http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java index 42a5445..476c65c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java @@ -24,11 +24,11 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.master.locking.LockProcedure; @@ -55,8 +55,8 @@ public class MobCompactionChore extends ScheduledChore { protected void chore() { try { TableDescriptors htds = master.getTableDescriptors(); - Map<String, HTableDescriptor> map = htds.getAll(); - for (HTableDescriptor htd : map.values()) { + Map<String, TableDescriptor> map = htds.getAll(); + for (TableDescriptor htd : map.values()) { if (!master.getTableStateManager().isTableState(htd.getTableName(), TableState.State.ENABLED)) { continue; @@ -66,7 +66,7 @@ public class MobCompactionChore extends ScheduledChore { final LockManager.MasterLock lock = master.getLockManager().createMasterLock( MobUtils.getTableLockName(htd.getTableName()), LockProcedure.LockType.EXCLUSIVE, this.getClass().getName() + ": mob compaction"); - for (HColumnDescriptor hcd : htd.getColumnFamilies()) { + for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { if (!hcd.isMobEnabled()) { continue; }
http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 18f6856..fb83971 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -24,12 +24,12 @@ import java.util.Set; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; @@ -198,7 +198,7 @@ public class TableStateManager { public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection) throws IOException { - final Map<String, HTableDescriptor> allDescriptors = + final Map<String, TableDescriptor> allDescriptors = tableDescriptors.getAllDescriptors(); final Map<String, TableState> states = new HashMap<>(); MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() { @@ -210,7 +210,7 @@ public class TableStateManager { return true; } }); - for (Map.Entry<String, HTableDescriptor> entry : allDescriptors.entrySet()) { + for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) { String table = entry.getKey(); if (table.equals(TableName.META_TABLE_NAME.getNameAsString())) continue; http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index 9aaf297..c398c9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -31,18 +31,18 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.master.CatalogJanitor; @@ -603,10 +603,10 @@ public class MergeTableRegionsProcedure throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Configuration conf = env.getMasterConfiguration(); - final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); for (String family: regionFs.getFamilies()) { - final HColumnDescriptor hcd = htd.getFamily(family.getBytes()); + final ColumnFamilyDescriptor hcd = htd.getColumnFamily(family.getBytes()); final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null && storeFiles.size() > 0) { @@ -682,7 +682,7 @@ public class MergeTableRegionsProcedure } private int getRegionReplication(final MasterProcedureEnv env) throws IOException { - final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); return htd.getRegionReplication(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 627eb57..072800b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; @@ -38,6 +37,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState.State; @@ -221,7 +221,7 @@ public class RegionStateStore { // ============================================================================================ public void splitRegion(final HRegionInfo parent, final HRegionInfo hriA, final HRegionInfo hriB, final ServerName serverName) throws IOException { - final HTableDescriptor htd = getTableDescriptor(parent.getTable()); + final TableDescriptor htd = getTableDescriptor(parent.getTable()); MetaTableAccessor.splitRegion(master.getConnection(), parent, hriA, hriB, serverName, getRegionReplication(htd), hasSerialReplicationScope(htd)); } @@ -231,7 +231,7 @@ public class RegionStateStore { // ============================================================================================ public void mergeRegions(final HRegionInfo parent, final HRegionInfo hriA, final HRegionInfo hriB, final ServerName serverName) throws IOException { - final HTableDescriptor htd = getTableDescriptor(parent.getTable()); + final TableDescriptor htd = getTableDescriptor(parent.getTable()); MetaTableAccessor.mergeRegions(master.getConnection(), parent, hriA, hriB, serverName, getRegionReplication(htd), EnvironmentEdgeManager.currentTime(), hasSerialReplicationScope(htd)); @@ -255,15 +255,15 @@ public class RegionStateStore { return hasSerialReplicationScope(getTableDescriptor(tableName)); } - private boolean hasSerialReplicationScope(final HTableDescriptor htd) { + private boolean hasSerialReplicationScope(final TableDescriptor htd) { return (htd != null)? htd.hasSerialReplicationScope(): false; } - private int getRegionReplication(final HTableDescriptor htd) { + private int getRegionReplication(final TableDescriptor htd) { return (htd != null) ? htd.getRegionReplication() : 1; } - private HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException { + private TableDescriptor getTableDescriptor(final TableName tableName) throws IOException { return master.getTableDescriptors().get(tableName); } http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index d9a1ab8..5e3d8c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -34,10 +34,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -194,7 +194,7 @@ class RegionLocationFinder { */ protected HDFSBlocksDistribution internalGetTopBlockLocation(HRegionInfo region) { try { - HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable()); + TableDescriptor tableDescriptor = getTableDescriptor(region.getTable()); if (tableDescriptor != null) { HDFSBlocksDistribution blocksDistribution = HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region); @@ -209,14 +209,14 @@ class RegionLocationFinder { } /** - * return HTableDescriptor for a given tableName + * return TableDescriptor for a given tableName * * @param tableName the table name - * @return HTableDescriptor + * @return TableDescriptor * @throws IOException */ - protected HTableDescriptor getTableDescriptor(TableName tableName) throws IOException { - HTableDescriptor tableDescriptor = null; + protected TableDescriptor getTableDescriptor(TableName tableName) throws IOException { + TableDescriptor tableDescriptor = null; try { if (this.services != null && this.services.getTableDescriptors() != null) { tableDescriptor = this.services.getTableDescriptors().get(tableName); http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java index 45b2401..f1ff936 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java @@ -27,17 +27,17 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.util.Bytes; @@ -63,11 +63,11 @@ public class ReplicationMetaCleaner extends ScheduledChore { @Override protected void chore() { try { - Map<String, HTableDescriptor> tables = master.getTableDescriptors().getAllDescriptors(); + Map<String, TableDescriptor> tables = master.getTableDescriptors().getAllDescriptors(); Map<String, Set<String>> serialTables = new HashMap<>(); - for (Map.Entry<String, HTableDescriptor> entry : tables.entrySet()) { + for (Map.Entry<String, TableDescriptor> entry : tables.entrySet()) { boolean hasSerialScope = false; - for (HColumnDescriptor column : entry.getValue().getFamilies()) { + for (ColumnFamilyDescriptor column : entry.getValue().getColumnFamilies()) { if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) { hasSerialScope = true; break; http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java index 34c1853..f19195e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java @@ -25,12 +25,13 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -45,30 +46,30 @@ public class AddColumnFamilyProcedure private static final Log LOG = LogFactory.getLog(AddColumnFamilyProcedure.class); private TableName tableName; - private HTableDescriptor unmodifiedHTableDescriptor; - private HColumnDescriptor cfDescriptor; + private TableDescriptor unmodifiedTableDescriptor; + private ColumnFamilyDescriptor cfDescriptor; private List<HRegionInfo> regionInfoList; private Boolean traceEnabled; public AddColumnFamilyProcedure() { super(); - this.unmodifiedHTableDescriptor = null; + this.unmodifiedTableDescriptor = null; this.regionInfoList = null; this.traceEnabled = null; } public AddColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName, - final HColumnDescriptor cfDescriptor) throws IOException { + final ColumnFamilyDescriptor cfDescriptor) throws IOException { this(env, tableName, cfDescriptor, null); } public AddColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName, - final HColumnDescriptor cfDescriptor, final ProcedurePrepareLatch latch) { + final ColumnFamilyDescriptor cfDescriptor, final ProcedurePrepareLatch latch) { super(env, latch); this.tableName = tableName; this.cfDescriptor = cfDescriptor; - this.unmodifiedHTableDescriptor = null; + this.unmodifiedTableDescriptor = null; this.regionInfoList = null; this.traceEnabled = null; } @@ -172,10 +173,10 @@ public class AddColumnFamilyProcedure MasterProcedureProtos.AddColumnFamilyStateData.newBuilder() .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor)); - if (unmodifiedHTableDescriptor != null) { + .setColumnfamilySchema(ProtobufUtil.toColumnFamilySchema(cfDescriptor)); + if (unmodifiedTableDescriptor != null) { addCFMsg - .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor)); + .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor)); } addCFMsg.build().writeDelimitedTo(stream); @@ -189,9 +190,9 @@ public class AddColumnFamilyProcedure MasterProcedureProtos.AddColumnFamilyStateData.parseDelimitedFrom(stream); setUser(MasterProcedureUtil.toUserInfo(addCFMsg.getUserInfo())); tableName = ProtobufUtil.toTableName(addCFMsg.getTableName()); - cfDescriptor = ProtobufUtil.convertToHColumnDesc(addCFMsg.getColumnfamilySchema()); + cfDescriptor = ProtobufUtil.toColumnFamilyDescriptor(addCFMsg.getColumnfamilySchema()); if (addCFMsg.hasUnmodifiedTableSchema()) { - unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(addCFMsg.getUnmodifiedTableSchema()); + unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(addCFMsg.getUnmodifiedTableSchema()); } } @@ -229,11 +230,11 @@ public class AddColumnFamilyProcedure checkTableModifiable(env); // In order to update the descriptor, we need to retrieve the old descriptor for comparison. - unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); - if (unmodifiedHTableDescriptor == null) { - throw new IOException("HTableDescriptor missing for " + tableName); + unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedTableDescriptor == null) { + throw new IOException("TableDescriptor missing for " + tableName); } - if (unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) { + if (unmodifiedTableDescriptor.hasColumnFamily(cfDescriptor.getName())) { throw new InvalidFamilyOperationException("Column family '" + getColumnFamilyName() + "' in table '" + tableName + "' already exists so cannot be added"); } @@ -258,17 +259,18 @@ public class AddColumnFamilyProcedure // Update table descriptor LOG.info("AddColumn. Table = " + tableName + " HCD = " + cfDescriptor.toString()); - HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); - if (htd.hasFamily(cfDescriptor.getName())) { + if (htd.hasColumnFamily(cfDescriptor.getName())) { // It is possible to reach this situation, as we could already add the column family // to table descriptor, but the master failover happens before we complete this state. // We should be able to handle running this function multiple times without causing problem. return; } - htd.addFamily(cfDescriptor); - env.getMasterServices().getTableDescriptors().add(htd); + env.getMasterServices().getTableDescriptors().add( + TableDescriptorBuilder.newBuilder(htd) + .addColumnFamily(cfDescriptor).build()); } /** @@ -277,14 +279,14 @@ public class AddColumnFamilyProcedure * @throws IOException **/ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { - HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); - if (htd.hasFamily(cfDescriptor.getName())) { + TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + if (htd.hasColumnFamily(cfDescriptor.getName())) { // Remove the column family from file system and update the table descriptor to // the before-add-column-family-state MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName, getRegionInfoList(env), cfDescriptor.getName(), cfDescriptor.isMobEnabled()); - env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor); // Make sure regions are opened after table descriptor is updated. //reOpenAllRegionsIfTableIsOnline(env); http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index afe72e2..cc39f53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -33,11 +33,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -67,7 +68,7 @@ public class CloneSnapshotProcedure extends AbstractStateMachineTableProcedure<CloneSnapshotState> { private static final Log LOG = LogFactory.getLog(CloneSnapshotProcedure.class); - private HTableDescriptor hTableDescriptor; + private TableDescriptor tableDescriptor; private SnapshotDescription snapshot; private boolean restoreAcl; private List<HRegionInfo> newRegions = null; @@ -85,21 +86,21 @@ public class CloneSnapshotProcedure } public CloneSnapshotProcedure(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) { - this(env, hTableDescriptor, snapshot, false); + final TableDescriptor tableDescriptor, final SnapshotDescription snapshot) { + this(env, tableDescriptor, snapshot, false); } /** * Constructor * @param env MasterProcedureEnv - * @param hTableDescriptor the table to operate on + * @param tableDescriptor the table to operate on * @param snapshot snapshot to clone from */ public CloneSnapshotProcedure(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot, + final TableDescriptor tableDescriptor, final SnapshotDescription snapshot, final boolean restoreAcl) { super(env); - this.hTableDescriptor = hTableDescriptor; + this.tableDescriptor = tableDescriptor; this.snapshot = snapshot; this.restoreAcl = restoreAcl; @@ -121,7 +122,7 @@ public class CloneSnapshotProcedure Configuration conf = env.getMasterServices().getConfiguration(); if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null && SnapshotDescriptionUtils.isSecurityAvailable(conf)) { - RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, hTableDescriptor.getTableName(), conf); + RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, tableDescriptor.getTableName(), conf); } } @@ -141,7 +142,7 @@ public class CloneSnapshotProcedure setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT); break; case CLONE_SNAPSHOT_WRITE_FS_LAYOUT: - newRegions = createFilesystemLayout(env, hTableDescriptor, newRegions); + newRegions = createFilesystemLayout(env, tableDescriptor, newRegions); setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META); break; case CLONE_SNAPSHOT_ADD_TO_META: @@ -224,7 +225,7 @@ public class CloneSnapshotProcedure @Override public TableName getTableName() { - return hTableDescriptor.getTableName(); + return tableDescriptor.getTableName(); } @Override @@ -250,7 +251,7 @@ public class CloneSnapshotProcedure MasterProcedureProtos.CloneSnapshotStateData.newBuilder() .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) .setSnapshot(this.snapshot) - .setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor)); + .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); if (newRegions != null) { for (HRegionInfo hri: newRegions) { cloneSnapshotMsg.addRegionInfo(HRegionInfo.convert(hri)); @@ -281,7 +282,7 @@ public class CloneSnapshotProcedure MasterProcedureProtos.CloneSnapshotStateData.parseDelimitedFrom(stream); setUser(MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo())); snapshot = cloneSnapshotMsg.getSnapshot(); - hTableDescriptor = ProtobufUtil.convertToHTableDesc(cloneSnapshotMsg.getTableSchema()); + tableDescriptor = ProtobufUtil.toTableDescriptor(cloneSnapshotMsg.getTableSchema()); if (cloneSnapshotMsg.getRegionInfoCount() == 0) { newRegions = null; } else { @@ -341,7 +342,7 @@ public class CloneSnapshotProcedure final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { - cpHost.preCreateTableAction(hTableDescriptor, null, getUser()); + cpHost.preCreateTableAction(tableDescriptor, null, getUser()); } } @@ -357,7 +358,7 @@ public class CloneSnapshotProcedure if (cpHost != null) { final HRegionInfo[] regions = (newRegions == null) ? null : newRegions.toArray(new HRegionInfo[newRegions.size()]); - cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser()); + cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser()); } } @@ -368,9 +369,9 @@ public class CloneSnapshotProcedure */ private List<HRegionInfo> createFilesystemLayout( final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, + final TableDescriptor tableDescriptor, final List<HRegionInfo> newRegions) throws IOException { - return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() { + return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() { @Override public List<HRegionInfo> createHdfsRegions( final MasterProcedureEnv env, @@ -390,7 +391,7 @@ public class CloneSnapshotProcedure Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot); RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper( - conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus); + conf, fs, manifest, tableDescriptor, tableRootDir, monitorException, monitorStatus); RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions(); // Clone operation should not have stuff to restore or remove @@ -429,7 +430,7 @@ public class CloneSnapshotProcedure */ private List<HRegionInfo> createFsLayout( final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, + final TableDescriptor tableDescriptor, List<HRegionInfo> newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); @@ -437,17 +438,17 @@ public class CloneSnapshotProcedure // 1. Create Table Descriptor // using a copy of descriptor, table will be created enabling first - HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor); - final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName()); + final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName()); ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors())) - .createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false); + .createTableDescriptorForTableDirectory(tempTableDir, + TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false); // 2. Create Regions newRegions = hdfsRegionHandler.createHdfsRegions( - env, tempdir, hTableDescriptor.getTableName(), newRegions); + env, tempdir, tableDescriptor.getTableName(), newRegions); // 3. Move Table temp directory to the hbase root location - CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir); + CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir); return newRegions; } @@ -458,11 +459,11 @@ public class CloneSnapshotProcedure * @throws IOException */ private void addRegionsToMeta(final MasterProcedureEnv env) throws IOException { - newRegions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, newRegions); + newRegions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, newRegions); RestoreSnapshotHelper.RestoreMetaChanges metaChanges = new RestoreSnapshotHelper.RestoreMetaChanges( - hTableDescriptor, parentsToChildrenPairMap); + tableDescriptor, parentsToChildrenPairMap); metaChanges.updateMetaParentRegions(env.getMasterServices().getConnection(), newRegions); } http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index cf55463..14604fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -30,12 +30,12 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -55,7 +55,7 @@ public class CreateTableProcedure extends AbstractStateMachineTableProcedure<CreateTableState> { private static final Log LOG = LogFactory.getLog(CreateTableProcedure.class); - private HTableDescriptor hTableDescriptor; + private TableDescriptor tableDescriptor; private List<HRegionInfo> newRegions; public CreateTableProcedure() { @@ -64,15 +64,15 @@ public class CreateTableProcedure } public CreateTableProcedure(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) { - this(env, hTableDescriptor, newRegions, null); + final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions) { + this(env, tableDescriptor, newRegions, null); } public CreateTableProcedure(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, + final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions, final ProcedurePrepareLatch syncLatch) { super(env, syncLatch); - this.hTableDescriptor = hTableDescriptor; + this.tableDescriptor = tableDescriptor; this.newRegions = newRegions != null ? Lists.newArrayList(newRegions) : null; } @@ -98,11 +98,11 @@ public class CreateTableProcedure setNextState(CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT); break; case CREATE_TABLE_WRITE_FS_LAYOUT: - newRegions = createFsLayout(env, hTableDescriptor, newRegions); + newRegions = createFsLayout(env, tableDescriptor, newRegions); setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META); break; case CREATE_TABLE_ADD_TO_META: - newRegions = addTableToMeta(env, hTableDescriptor, newRegions); + newRegions = addTableToMeta(env, tableDescriptor, newRegions); setNextState(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS); break; case CREATE_TABLE_ASSIGN_REGIONS: @@ -174,7 +174,7 @@ public class CreateTableProcedure @Override public TableName getTableName() { - return hTableDescriptor.getTableName(); + return tableDescriptor.getTableName(); } @Override @@ -189,7 +189,7 @@ public class CreateTableProcedure MasterProcedureProtos.CreateTableStateData.Builder state = MasterProcedureProtos.CreateTableStateData.newBuilder() .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) - .setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor)); + .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); if (newRegions != null) { for (HRegionInfo hri: newRegions) { state.addRegionInfo(HRegionInfo.convert(hri)); @@ -205,7 +205,7 @@ public class CreateTableProcedure MasterProcedureProtos.CreateTableStateData state = MasterProcedureProtos.CreateTableStateData.parseDelimitedFrom(stream); setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo())); - hTableDescriptor = ProtobufUtil.convertToHTableDesc(state.getTableSchema()); + tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema()); if (state.getRegionInfoCount() == 0) { newRegions = null; } else { @@ -235,7 +235,7 @@ public class CreateTableProcedure } // check that we have at least 1 CF - if (hTableDescriptor.getColumnFamilyCount() == 0) { + if (tableDescriptor.getColumnFamilyCount() == 0) { setFailure("master-create-table", new DoNotRetryIOException("Table " + getTableName().toString() + " should have at least one column family.")); return false; @@ -256,7 +256,7 @@ public class CreateTableProcedure if (cpHost != null) { final HRegionInfo[] regions = newRegions == null ? null : newRegions.toArray(new HRegionInfo[newRegions.size()]); - cpHost.preCreateTableAction(hTableDescriptor, regions, getUser()); + cpHost.preCreateTableAction(tableDescriptor, regions, getUser()); } } @@ -266,7 +266,7 @@ public class CreateTableProcedure if (cpHost != null) { final HRegionInfo[] regions = (newRegions == null) ? null : newRegions.toArray(new HRegionInfo[newRegions.size()]); - cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser()); + cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser()); } } @@ -277,9 +277,9 @@ public class CreateTableProcedure } protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, final List<HRegionInfo> newRegions) + final TableDescriptor tableDescriptor, final List<HRegionInfo> newRegions) throws IOException { - return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() { + return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() { @Override public List<HRegionInfo> createHdfsRegions(final MasterProcedureEnv env, final Path tableRootDir, final TableName tableName, @@ -287,40 +287,40 @@ public class CreateTableProcedure HRegionInfo[] regions = newRegions != null ? newRegions.toArray(new HRegionInfo[newRegions.size()]) : null; return ModifyRegionUtils.createRegions(env.getMasterConfiguration(), - tableRootDir, hTableDescriptor, regions, null); + tableRootDir, tableDescriptor, regions, null); } }); } protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions, + final TableDescriptor tableDescriptor, List<HRegionInfo> newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Path tempdir = mfs.getTempDir(); // 1. Create Table Descriptor // using a copy of descriptor, table will be created enabling first - final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName()); + final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName()); ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors())) .createTableDescriptorForTableDirectory( - tempTableDir, hTableDescriptor, false); + tempTableDir, tableDescriptor, false); // 2. Create Regions newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir, - hTableDescriptor.getTableName(), newRegions); + tableDescriptor.getTableName(), newRegions); // 3. Move Table temp directory to the hbase root location - moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir); + moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir); return newRegions; } protected static void moveTempDirectoryToHBaseRoot( final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, + final TableDescriptor tableDescriptor, final Path tempTableDir) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); - final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName()); + final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName()); FileSystem fs = mfs.getFileSystem(); if (!fs.delete(tableDir, true) && fs.exists(tableDir)) { throw new IOException("Couldn't delete " + tableDir); @@ -332,20 +332,20 @@ public class CreateTableProcedure } protected static List<HRegionInfo> addTableToMeta(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, + final TableDescriptor tableDescriptor, final List<HRegionInfo> regions) throws IOException { assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions; ProcedureSyncWait.waitMetaRegions(env); // Add replicas if needed - List<HRegionInfo> newRegions = addReplicas(env, hTableDescriptor, regions); + List<HRegionInfo> newRegions = addReplicas(env, tableDescriptor, regions); // Add regions to META - addRegionsToMeta(env, hTableDescriptor, newRegions); + addRegionsToMeta(env, tableDescriptor, newRegions); // Setup replication for region replicas if needed - if (hTableDescriptor.getRegionReplication() > 1) { + if (tableDescriptor.getRegionReplication() > 1) { ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); } return newRegions; @@ -354,14 +354,14 @@ public class CreateTableProcedure /** * Create any replicas for the regions (the default replicas that was * already created is passed to the method) - * @param hTableDescriptor descriptor to use + * @param tableDescriptor descriptor to use * @param regions default replicas * @return the combined list of default and non-default replicas */ private static List<HRegionInfo> addReplicas(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, + final TableDescriptor tableDescriptor, final List<HRegionInfo> regions) { - int numRegionReplicas = hTableDescriptor.getRegionReplication() - 1; + int numRegionReplicas = tableDescriptor.getRegionReplication() - 1; if (numRegionReplicas <= 0) { return regions; } @@ -394,10 +394,10 @@ public class CreateTableProcedure * Add the specified set of regions to the hbase:meta table. */ private static void addRegionsToMeta(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, + final TableDescriptor tableDescriptor, final List<HRegionInfo> regionInfos) throws IOException { MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), - regionInfos, hTableDescriptor.getRegionReplication()); + regionInfos, tableDescriptor.getRegionReplication()); } protected static void updateTableDescCache(final MasterProcedureEnv env, http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java index 78bd715..9ec814a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java @@ -26,10 +26,11 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -45,7 +46,7 @@ public class DeleteColumnFamilyProcedure extends AbstractStateMachineTableProcedure<DeleteColumnFamilyState> { private static final Log LOG = LogFactory.getLog(DeleteColumnFamilyProcedure.class); - private HTableDescriptor unmodifiedHTableDescriptor; + private TableDescriptor unmodifiedTableDescriptor; private TableName tableName; private byte [] familyName; private boolean hasMob; @@ -55,7 +56,7 @@ public class DeleteColumnFamilyProcedure public DeleteColumnFamilyProcedure() { super(); - this.unmodifiedHTableDescriptor = null; + this.unmodifiedTableDescriptor = null; this.regionInfoList = null; this.traceEnabled = null; } @@ -70,7 +71,7 @@ public class DeleteColumnFamilyProcedure super(env, latch); this.tableName = tableName; this.familyName = familyName; - this.unmodifiedHTableDescriptor = null; + this.unmodifiedTableDescriptor = null; this.regionInfoList = null; this.traceEnabled = null; } @@ -179,9 +180,9 @@ public class DeleteColumnFamilyProcedure .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) .setTableName(ProtobufUtil.toProtoTableName(tableName)) .setColumnfamilyName(UnsafeByteOperations.unsafeWrap(familyName)); - if (unmodifiedHTableDescriptor != null) { + if (unmodifiedTableDescriptor != null) { deleteCFMsg - .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor)); + .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor)); } deleteCFMsg.build().writeDelimitedTo(stream); @@ -197,7 +198,7 @@ public class DeleteColumnFamilyProcedure familyName = deleteCFMsg.getColumnfamilyName().toByteArray(); if (deleteCFMsg.hasUnmodifiedTableSchema()) { - unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(deleteCFMsg.getUnmodifiedTableSchema()); + unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(deleteCFMsg.getUnmodifiedTableSchema()); } } @@ -235,22 +236,22 @@ public class DeleteColumnFamilyProcedure checkTableModifiable(env); // In order to update the descriptor, we need to retrieve the old descriptor for comparison. - unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); - if (unmodifiedHTableDescriptor == null) { - throw new IOException("HTableDescriptor missing for " + tableName); + unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedTableDescriptor == null) { + throw new IOException("TableDescriptor missing for " + tableName); } - if (!unmodifiedHTableDescriptor.hasFamily(familyName)) { + if (!unmodifiedTableDescriptor.hasColumnFamily(familyName)) { throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName() + "' does not exist, so it cannot be deleted"); } - if (unmodifiedHTableDescriptor.getColumnFamilyCount() == 1) { + if (unmodifiedTableDescriptor.getColumnFamilyCount() == 1) { throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName() + "' is the only column family in the table, so it cannot be deleted"); } // whether mob family - hasMob = unmodifiedHTableDescriptor.getFamily(familyName).isMobEnabled(); + hasMob = unmodifiedTableDescriptor.getColumnFamily(familyName).isMobEnabled(); } /** @@ -272,17 +273,17 @@ public class DeleteColumnFamilyProcedure // Update table descriptor LOG.info("DeleteColumn. Table = " + tableName + " family = " + getColumnFamilyName()); - HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); - if (!htd.hasFamily(familyName)) { + if (!htd.hasColumnFamily(familyName)) { // It is possible to reach this situation, as we could already delete the column family // from table descriptor, but the master failover happens before we complete this state. // We should be able to handle running this function multiple times without causing problem. return; } - htd.removeFamily(familyName); - env.getMasterServices().getTableDescriptors().add(htd); + env.getMasterServices().getTableDescriptors().add( + TableDescriptorBuilder.newBuilder(htd).removeColumnFamily(familyName).build()); } /** @@ -291,7 +292,7 @@ public class DeleteColumnFamilyProcedure * @throws IOException **/ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor); // Make sure regions are opened after table descriptor is updated. //reOpenAllRegionsIfTableIsOnline(env); http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java index 622c19f..ac86dab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java @@ -24,11 +24,12 @@ import java.io.OutputStream; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -43,28 +44,28 @@ public class ModifyColumnFamilyProcedure private static final Log LOG = LogFactory.getLog(ModifyColumnFamilyProcedure.class); private TableName tableName; - private HTableDescriptor unmodifiedHTableDescriptor; - private HColumnDescriptor cfDescriptor; + private TableDescriptor unmodifiedtableDescriptor; + private ColumnFamilyDescriptor cfDescriptor; private Boolean traceEnabled; public ModifyColumnFamilyProcedure() { super(); - this.unmodifiedHTableDescriptor = null; + this.unmodifiedtableDescriptor = null; this.traceEnabled = null; } public ModifyColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName, - final HColumnDescriptor cfDescriptor) { + final ColumnFamilyDescriptor cfDescriptor) { this(env, tableName, cfDescriptor, null); } public ModifyColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName, - final HColumnDescriptor cfDescriptor, final ProcedurePrepareLatch latch) { + final ColumnFamilyDescriptor cfDescriptor, final ProcedurePrepareLatch latch) { super(env, latch); this.tableName = tableName; this.cfDescriptor = cfDescriptor; - this.unmodifiedHTableDescriptor = null; + this.unmodifiedtableDescriptor = null; this.traceEnabled = null; } @@ -165,10 +166,10 @@ public class ModifyColumnFamilyProcedure MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder() .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor)); - if (unmodifiedHTableDescriptor != null) { + .setColumnfamilySchema(ProtobufUtil.toColumnFamilySchema(cfDescriptor)); + if (unmodifiedtableDescriptor != null) { modifyCFMsg - .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor)); + .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedtableDescriptor)); } modifyCFMsg.build().writeDelimitedTo(stream); @@ -182,9 +183,9 @@ public class ModifyColumnFamilyProcedure MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream); setUser(MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo())); tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName()); - cfDescriptor = ProtobufUtil.convertToHColumnDesc(modifyCFMsg.getColumnfamilySchema()); + cfDescriptor = ProtobufUtil.toColumnFamilyDescriptor(modifyCFMsg.getColumnfamilySchema()); if (modifyCFMsg.hasUnmodifiedTableSchema()) { - unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(modifyCFMsg.getUnmodifiedTableSchema()); + unmodifiedtableDescriptor = ProtobufUtil.toTableDescriptor(modifyCFMsg.getUnmodifiedTableSchema()); } } @@ -221,11 +222,11 @@ public class ModifyColumnFamilyProcedure // Checks whether the table is allowed to be modified. checkTableModifiable(env); - unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); - if (unmodifiedHTableDescriptor == null) { - throw new IOException("HTableDescriptor missing for " + tableName); + unmodifiedtableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedtableDescriptor == null) { + throw new IOException("TableDescriptor missing for " + tableName); } - if (!unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) { + if (!unmodifiedtableDescriptor.hasColumnFamily(cfDescriptor.getName())) { throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName() + "' does not exist, so it cannot be modified"); } @@ -250,9 +251,9 @@ public class ModifyColumnFamilyProcedure // Update table descriptor LOG.info("ModifyColumnFamily. Table = " + tableName + " HCD = " + cfDescriptor.toString()); - HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); - htd.modifyFamily(cfDescriptor); - env.getMasterServices().getTableDescriptors().add(htd); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(env.getMasterServices().getTableDescriptors().get(tableName)); + builder.modifyColumnFamily(cfDescriptor); + env.getMasterServices().getTableDescriptors().add(builder.build()); } /** @@ -261,7 +262,7 @@ public class ModifyColumnFamilyProcedure * @throws IOException **/ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + env.getMasterServices().getTableDescriptors().add(unmodifiedtableDescriptor); // Make sure regions are opened after table descriptor is updated. //reOpenAllRegionsIfTableIsOnline(env); http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 20a6a03..9741faa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -30,7 +30,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; @@ -40,6 +39,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -52,8 +52,8 @@ public class ModifyTableProcedure extends AbstractStateMachineTableProcedure<ModifyTableState> { private static final Log LOG = LogFactory.getLog(ModifyTableProcedure.class); - private HTableDescriptor unmodifiedHTableDescriptor = null; - private HTableDescriptor modifiedHTableDescriptor; + private TableDescriptor unmodifiedTableDescriptor = null; + private TableDescriptor modifiedTableDescriptor; private boolean deleteColumnFamilyInModify; private List<HRegionInfo> regionInfoList; @@ -64,19 +64,19 @@ public class ModifyTableProcedure initilize(); } - public ModifyTableProcedure(final MasterProcedureEnv env, final HTableDescriptor htd) { + public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd) { this(env, htd, null); } - public ModifyTableProcedure(final MasterProcedureEnv env, final HTableDescriptor htd, + public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd, final ProcedurePrepareLatch latch) { super(env, latch); initilize(); - this.modifiedHTableDescriptor = htd; + this.modifiedTableDescriptor = htd; } private void initilize() { - this.unmodifiedHTableDescriptor = null; + this.unmodifiedTableDescriptor = null; this.regionInfoList = null; this.traceEnabled = null; this.deleteColumnFamilyInModify = false; @@ -104,7 +104,7 @@ public class ModifyTableProcedure setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN); break; case MODIFY_TABLE_REMOVE_REPLICA_COLUMN: - updateReplicaColumnsIfNeeded(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor); + updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor); if (deleteColumnFamilyInModify) { setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT); } else { @@ -112,7 +112,7 @@ public class ModifyTableProcedure } break; case MODIFY_TABLE_DELETE_FS_LAYOUT: - deleteFromFs(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor); + deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor); setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION); break; case MODIFY_TABLE_POST_OPERATION: @@ -191,12 +191,12 @@ public class ModifyTableProcedure MasterProcedureProtos.ModifyTableStateData.Builder modifyTableMsg = MasterProcedureProtos.ModifyTableStateData.newBuilder() .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) - .setModifiedTableSchema(ProtobufUtil.convertToTableSchema(modifiedHTableDescriptor)) + .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor)) .setDeleteColumnFamilyInModify(deleteColumnFamilyInModify); - if (unmodifiedHTableDescriptor != null) { + if (unmodifiedTableDescriptor != null) { modifyTableMsg - .setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor)); + .setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor)); } modifyTableMsg.build().writeDelimitedTo(stream); @@ -209,18 +209,18 @@ public class ModifyTableProcedure MasterProcedureProtos.ModifyTableStateData modifyTableMsg = MasterProcedureProtos.ModifyTableStateData.parseDelimitedFrom(stream); setUser(MasterProcedureUtil.toUserInfo(modifyTableMsg.getUserInfo())); - modifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(modifyTableMsg.getModifiedTableSchema()); + modifiedTableDescriptor = ProtobufUtil.toTableDescriptor(modifyTableMsg.getModifiedTableSchema()); deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify(); if (modifyTableMsg.hasUnmodifiedTableSchema()) { - unmodifiedHTableDescriptor = - ProtobufUtil.convertToHTableDesc(modifyTableMsg.getUnmodifiedTableSchema()); + unmodifiedTableDescriptor = + ProtobufUtil.toTableDescriptor(modifyTableMsg.getUnmodifiedTableSchema()); } } @Override public TableName getTableName() { - return modifiedHTableDescriptor.getTableName(); + return modifiedTableDescriptor.getTableName(); } @Override @@ -240,27 +240,27 @@ public class ModifyTableProcedure } // check that we have at least 1 CF - if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) { + if (modifiedTableDescriptor.getColumnFamilyCount() == 0) { throw new DoNotRetryIOException("Table " + getTableName().toString() + " should have at least one column family."); } // In order to update the descriptor, we need to retrieve the old descriptor for comparison. - this.unmodifiedHTableDescriptor = + this.unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(getTableName()); if (env.getMasterServices().getTableStateManager() .isTableState(getTableName(), TableState.State.ENABLED)) { - if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor + if (modifiedTableDescriptor.getRegionReplication() != unmodifiedTableDescriptor .getRegionReplication()) { throw new IOException("REGION_REPLICATION change is not supported for enabled tables"); } } - // Find out whether all column families in unmodifiedHTableDescriptor also exists in - // the modifiedHTableDescriptor. This is to determine whether we are safe to rollback. - final Set<byte[]> oldFamilies = unmodifiedHTableDescriptor.getFamiliesKeys(); - final Set<byte[]> newFamilies = modifiedHTableDescriptor.getFamiliesKeys(); + // Find out whether all column families in unmodifiedTableDescriptor also exists in + // the modifiedTableDescriptor. This is to determine whether we are safe to rollback. + final Set<byte[]> oldFamilies = unmodifiedTableDescriptor.getColumnFamilyNames(); + final Set<byte[]> newFamilies = modifiedTableDescriptor.getColumnFamilyNames(); for (byte[] familyName : oldFamilies) { if (!newFamilies.contains(familyName)) { this.deleteColumnFamilyInModify = true; @@ -287,7 +287,7 @@ public class ModifyTableProcedure * @throws IOException **/ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor); + env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor); } /** @@ -296,10 +296,10 @@ public class ModifyTableProcedure * @throws IOException **/ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor); - // delete any new column families from the modifiedHTableDescriptor. - deleteFromFs(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor); + // delete any new column families from the modifiedTableDescriptor. + deleteFromFs(env, modifiedTableDescriptor, unmodifiedTableDescriptor); // Make sure regions are opened after table descriptor is updated. //reOpenAllRegionsIfTableIsOnline(env); @@ -312,18 +312,17 @@ public class ModifyTableProcedure * @throws IOException */ private void deleteFromFs(final MasterProcedureEnv env, - final HTableDescriptor oldHTableDescriptor, final HTableDescriptor newHTableDescriptor) + final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor) throws IOException { - final Set<byte[]> oldFamilies = oldHTableDescriptor.getFamiliesKeys(); - final Set<byte[]> newFamilies = newHTableDescriptor.getFamiliesKeys(); + final Set<byte[]> oldFamilies = oldTableDescriptor.getColumnFamilyNames(); + final Set<byte[]> newFamilies = newTableDescriptor.getColumnFamilyNames(); for (byte[] familyName : oldFamilies) { if (!newFamilies.contains(familyName)) { MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem( env, getTableName(), getRegionInfoList(env), - familyName, - oldHTableDescriptor.getFamily(familyName).isMobEnabled()); + familyName, oldTableDescriptor.getColumnFamily(familyName).isMobEnabled()); } } } @@ -335,10 +334,10 @@ public class ModifyTableProcedure */ private void updateReplicaColumnsIfNeeded( final MasterProcedureEnv env, - final HTableDescriptor oldHTableDescriptor, - final HTableDescriptor newHTableDescriptor) throws IOException { - final int oldReplicaCount = oldHTableDescriptor.getRegionReplication(); - final int newReplicaCount = newHTableDescriptor.getRegionReplication(); + final TableDescriptor oldTableDescriptor, + final TableDescriptor newTableDescriptor) throws IOException { + final int oldReplicaCount = oldTableDescriptor.getRegionReplication(); + final int newReplicaCount = newTableDescriptor.getRegionReplication(); if (newReplicaCount < oldReplicaCount) { Set<byte[]> tableRows = new HashSet<>(); @@ -402,10 +401,10 @@ public class ModifyTableProcedure if (cpHost != null) { switch (state) { case MODIFY_TABLE_PRE_OPERATION: - cpHost.preModifyTableAction(getTableName(), modifiedHTableDescriptor, getUser()); + cpHost.preModifyTableAction(getTableName(), modifiedTableDescriptor, getUser()); break; case MODIFY_TABLE_POST_OPERATION: - cpHost.postCompletedModifyTableAction(getTableName(), modifiedHTableDescriptor,getUser()); + cpHost.postCompletedModifyTableAction(getTableName(), modifiedTableDescriptor,getUser()); break; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index cfd9df9..4930396 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -33,12 +33,12 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -61,7 +61,7 @@ public class RestoreSnapshotProcedure extends AbstractStateMachineTableProcedure<RestoreSnapshotState> { private static final Log LOG = LogFactory.getLog(RestoreSnapshotProcedure.class); - private HTableDescriptor modifiedHTableDescriptor; + private TableDescriptor modifiedTableDescriptor; private List<HRegionInfo> regionsToRestore = null; private List<HRegionInfo> regionsToRemove = null; private List<HRegionInfo> regionsToAdd = null; @@ -82,24 +82,24 @@ public class RestoreSnapshotProcedure } public RestoreSnapshotProcedure(final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) { - this(env, hTableDescriptor, snapshot, false); + final TableDescriptor tableDescriptor, final SnapshotDescription snapshot) { + this(env, tableDescriptor, snapshot, false); } /** * Constructor * @param env MasterProcedureEnv - * @param hTableDescriptor the table to operate on + * @param tableDescriptor the table to operate on * @param snapshot snapshot to restore from * @throws IOException */ public RestoreSnapshotProcedure( final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, + final TableDescriptor tableDescriptor, final SnapshotDescription snapshot, final boolean restoreAcl) { super(env); // This is the new schema we are going to write out as this modification. - this.modifiedHTableDescriptor = hTableDescriptor; + this.modifiedTableDescriptor = tableDescriptor; // Snapshot information this.snapshot = snapshot; this.restoreAcl = restoreAcl; @@ -204,7 +204,7 @@ public class RestoreSnapshotProcedure @Override public TableName getTableName() { - return modifiedHTableDescriptor.getTableName(); + return modifiedTableDescriptor.getTableName(); } @Override @@ -236,7 +236,7 @@ public class RestoreSnapshotProcedure MasterProcedureProtos.RestoreSnapshotStateData.newBuilder() .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) .setSnapshot(this.snapshot) - .setModifiedTableSchema(ProtobufUtil.convertToTableSchema(modifiedHTableDescriptor)); + .setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor)); if (regionsToRestore != null) { for (HRegionInfo hri: regionsToRestore) { @@ -278,8 +278,8 @@ public class RestoreSnapshotProcedure MasterProcedureProtos.RestoreSnapshotStateData.parseDelimitedFrom(stream); setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo())); snapshot = restoreSnapshotMsg.getSnapshot(); - modifiedHTableDescriptor = - ProtobufUtil.convertToHTableDesc(restoreSnapshotMsg.getModifiedTableSchema()); + modifiedTableDescriptor = + ProtobufUtil.toTableDescriptor(restoreSnapshotMsg.getModifiedTableSchema()); if (restoreSnapshotMsg.getRegionInfoForRestoreCount() == 0) { regionsToRestore = null; @@ -333,7 +333,7 @@ public class RestoreSnapshotProcedure env.getMasterServices().checkTableModifiable(tableName); // Check that we have at least 1 CF - if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) { + if (modifiedTableDescriptor.getColumnFamilyCount() == 0) { throw new DoNotRetryIOException("Table " + getTableName().toString() + " should have at least one column family."); } @@ -363,7 +363,7 @@ public class RestoreSnapshotProcedure * @throws IOException **/ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor); + env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor); } /** @@ -386,7 +386,7 @@ public class RestoreSnapshotProcedure env.getMasterServices().getConfiguration(), fs, manifest, - modifiedHTableDescriptor, + modifiedTableDescriptor, rootDir, monitorException, getMonitorStatus()); @@ -440,19 +440,19 @@ public class RestoreSnapshotProcedure MetaTableAccessor.addRegionsToMeta( conn, regionsToAdd, - modifiedHTableDescriptor.getRegionReplication()); + modifiedTableDescriptor.getRegionReplication()); } if (regionsToRestore != null) { MetaTableAccessor.overwriteRegions( conn, regionsToRestore, - modifiedHTableDescriptor.getRegionReplication()); + modifiedTableDescriptor.getRegionReplication()); } RestoreSnapshotHelper.RestoreMetaChanges metaChanges = new RestoreSnapshotHelper.RestoreMetaChanges( - modifiedHTableDescriptor, parentsToChildrenPairMap); + modifiedTableDescriptor, parentsToChildrenPairMap); metaChanges.updateMetaParentRegions(conn, regionsToAdd); // At this point the restore is complete. http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java index e7f5ead..506c67d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -28,11 +28,11 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -48,7 +48,7 @@ public class TruncateTableProcedure private boolean preserveSplits; private List<HRegionInfo> regions; - private HTableDescriptor hTableDescriptor; + private TableDescriptor tableDescriptor; private TableName tableName; public TruncateTableProcedure() { @@ -95,7 +95,7 @@ public class TruncateTableProcedure setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META); break; case TRUNCATE_TABLE_REMOVE_FROM_META: - hTableDescriptor = env.getMasterServices().getTableDescriptors() + tableDescriptor = env.getMasterServices().getTableDescriptors() .get(tableName); DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); DeleteTableProcedure.deleteAssignmentState(env, getTableName()); @@ -105,26 +105,26 @@ public class TruncateTableProcedure DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); if (!preserveSplits) { // if we are not preserving splits, generate a new single region - regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null)); + regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(tableDescriptor, null)); } else { regions = recreateRegionInfo(regions); } setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT); break; case TRUNCATE_TABLE_CREATE_FS_LAYOUT: - regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions); + regions = CreateTableProcedure.createFsLayout(env, tableDescriptor, regions); CreateTableProcedure.updateTableDescCache(env, getTableName()); setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META); break; case TRUNCATE_TABLE_ADD_TO_META: - regions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, regions); + regions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, regions); setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS); break; case TRUNCATE_TABLE_ASSIGN_REGIONS: CreateTableProcedure.setEnablingState(env, getTableName()); addChildProcedure(env.getAssignmentManager().createAssignProcedures(regions)); setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION); - hTableDescriptor = null; + tableDescriptor = null; regions = null; break; case TRUNCATE_TABLE_POST_OPERATION: @@ -216,8 +216,8 @@ public class TruncateTableProcedure MasterProcedureProtos.TruncateTableStateData.newBuilder() .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser())) .setPreserveSplits(preserveSplits); - if (hTableDescriptor != null) { - state.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor)); + if (tableDescriptor != null) { + state.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); } else { state.setTableName(ProtobufUtil.toProtoTableName(tableName)); } @@ -237,8 +237,8 @@ public class TruncateTableProcedure MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream); setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo())); if (state.hasTableSchema()) { - hTableDescriptor = ProtobufUtil.convertToHTableDesc(state.getTableSchema()); - tableName = hTableDescriptor.getTableName(); + tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema()); + tableName = tableDescriptor.getTableName(); } else { tableName = ProtobufUtil.toTableName(state.getTableName()); } http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java index 0448f92..e8131af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java @@ -30,9 +30,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -137,16 +137,16 @@ public final class MasterSnapshotVerifier { * @param manifest snapshot manifest to inspect */ private void verifyTableInfo(final SnapshotManifest manifest) throws IOException { - HTableDescriptor htd = manifest.getTableDescriptor(); + TableDescriptor htd = manifest.getTableDescriptor(); if (htd == null) { throw new CorruptedSnapshotException("Missing Table Descriptor", ProtobufUtil.createSnapshotDesc(snapshot)); } - if (!htd.getNameAsString().equals(snapshot.getTable())) { + if (!htd.getTableName().getNameAsString().equals(snapshot.getTable())) { throw new CorruptedSnapshotException( "Invalid Table Descriptor. Expected " + snapshot.getTable() + " name, got " - + htd.getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot)); + + htd.getTableName().getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot)); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index b81c7db..b503d61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -38,12 +38,13 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -555,7 +556,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable cleanupSentinels(); // check to see if the table exists - HTableDescriptor desc = null; + TableDescriptor desc = null; try { desc = master.getTableDescriptors().get( TableName.valueOf(snapshot.getTable())); @@ -679,10 +680,10 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @throws IOException */ private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName, - final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc, + final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc, final NonceKey nonceKey, final boolean restoreAcl) throws IOException { MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); - HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc); + TableDescriptor htd = TableDescriptorBuilder.copy(tableName, snapshotTableDesc); if (cpHost != null) { cpHost.preCloneSnapshot(reqSnapshot, htd); } @@ -707,14 +708,14 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * The operation will fail if the destination table has a snapshot or restore in progress. * * @param snapshot Snapshot Descriptor - * @param hTableDescriptor Table Descriptor of the table to create + * @param tableDescriptor Table Descriptor of the table to create * @param nonceKey unique identifier to prevent duplicated RPC * @return procId the ID of the clone snapshot procedure */ synchronized long cloneSnapshot(final SnapshotDescription snapshot, - final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) + final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) throws HBaseSnapshotException { - TableName tableName = hTableDescriptor.getTableName(); + TableName tableName = tableDescriptor.getTableName(); // make sure we aren't running a snapshot on the same table if (isTakingSnapshot(tableName)) { @@ -729,7 +730,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable try { long procId = master.getMasterProcedureExecutor().submitProcedure( new CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(), - hTableDescriptor, snapshot, restoreAcl), + tableDescriptor, snapshot, restoreAcl), nonceKey); this.restoreTableToProcIdMap.put(tableName, procId); return procId; @@ -765,7 +766,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs, snapshotDir, snapshot); - HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor(); + TableDescriptor snapshotTableDesc = manifest.getTableDescriptor(); TableName tableName = TableName.valueOf(reqSnapshot.getTable()); // stop tracking "abandoned" handlers @@ -799,7 +800,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @throws IOException */ private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName, - final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc, + final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc, final NonceKey nonceKey, final boolean restoreAcl) throws IOException { MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); @@ -836,15 +837,15 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * Restore the specified snapshot. The restore will fail if the destination table has a snapshot * or restore in progress. * @param snapshot Snapshot Descriptor - * @param hTableDescriptor Table Descriptor + * @param tableDescriptor Table Descriptor * @param nonceKey unique identifier to prevent duplicated RPC * @param restoreAcl true to restore acl of snapshot * @return procId the ID of the restore snapshot procedure */ private synchronized long restoreSnapshot(final SnapshotDescription snapshot, - final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) + final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl) throws HBaseSnapshotException { - final TableName tableName = hTableDescriptor.getTableName(); + final TableName tableName = tableDescriptor.getTableName(); // make sure we aren't running a snapshot on the same table if (isTakingSnapshot(tableName)) { @@ -859,7 +860,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable try { long procId = master.getMasterProcedureExecutor().submitProcedure( new RestoreSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(), - hTableDescriptor, snapshot, restoreAcl), + tableDescriptor, snapshot, restoreAcl), nonceKey); this.restoreTableToProcIdMap.put(tableName, procId); return procId;