http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
index e7157d0..5d3b50b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
@@ -332,7 +332,7 @@ public class CompactSplit implements CompactionRequestor, 
PropagatingConfigurati
       final String why, int priority, CompactionRequest request, boolean 
selectNow, User user)
           throws IOException {
     if (this.server.isStopped()
-        || (r.getTableDesc() != null && 
!r.getTableDesc().isCompactionEnabled())) {
+        || (r.getTableDescriptor() != null && 
!r.getTableDescriptor().isCompactionEnabled())) {
       return null;
     }
 
@@ -345,7 +345,7 @@ public class CompactSplit implements CompactionRequestor, 
PropagatingConfigurati
     final RegionServerSpaceQuotaManager spaceQuotaManager =
       this.server.getRegionServerSpaceQuotaManager();
     if (spaceQuotaManager != null && spaceQuotaManager.areCompactionsDisabled(
-        r.getTableDesc().getTableName())) {
+        r.getTableDescriptor().getTableName())) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Ignoring compaction request for " + r + " as an active 
space quota violation "
             + " policy disallows compactions.");
@@ -562,7 +562,7 @@ public class CompactSplit implements CompactionRequestor, 
PropagatingConfigurati
     public void run() {
       Preconditions.checkNotNull(server);
       if (server.isStopped()
-          || (region.getTableDesc() != null && 
!region.getTableDesc().isCompactionEnabled())) {
+          || (region.getTableDescriptor() != null && 
!region.getTableDescriptor().isCompactionEnabled())) {
         return;
       }
       doCompaction(user);

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 5b9372a..f07009c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -408,7 +408,7 @@ public class CompactingMemStore extends AbstractMemStore {
   }
 
   private byte[] getFamilyNameInBytes() {
-    return store.getFamily().getName();
+    return store.getColumnFamilyDescriptor().getName();
   }
 
   private ThreadPoolExecutor getPool() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
index d915f2e..324c1de 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 
 /**
  * A {@link RegionSplitPolicy} implementation which splits a region
@@ -47,7 +48,7 @@ public class ConstantSizeRegionSplitPolicy extends 
RegionSplitPolicy {
   protected void configureForRegion(HRegion region) {
     super.configureForRegion(region);
     Configuration conf = getConf();
-    HTableDescriptor desc = region.getTableDesc();
+    TableDescriptor desc = region.getTableDescriptor();
     if (desc != null) {
       this.desiredMaxFileSize = desc.getMaxFileSize();
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
index ef49f29..21f93ff 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
@@ -64,7 +64,7 @@ public class DefaultStoreFlusher extends StoreFlusher {
       synchronized (flushLock) {
         status.setStatus("Flushing " + store + ": creating writer");
         // Write the map out to the disk
-        writer = store.createWriterInTmp(cellsCount, 
store.getFamily().getCompressionType(),
+        writer = store.createWriterInTmp(cellsCount, 
store.getColumnFamilyDescriptor().getCompressionType(),
             /* isCompaction = */ false,
             /* includeMVCCReadpoint = */ true,
             /* includesTags = */ snapshot.isTagsPresent(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
index 36e7929..daba069 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
@@ -51,9 +51,9 @@ public class DelimitedKeyPrefixRegionSplitPolicy extends 
IncreasingToUpperBoundR
   protected void configureForRegion(HRegion region) {
     super.configureForRegion(region);
     // read the prefix length from the table descriptor
-    String delimiterString = region.getTableDesc().getValue(DELIMITER_KEY);
+    String delimiterString = 
region.getTableDescriptor().getValue(DELIMITER_KEY);
     if (delimiterString == null || delimiterString.length() == 0) {
-      LOG.error(DELIMITER_KEY + " not specified for table " + 
region.getTableDesc().getTableName() +
+      LOG.error(DELIMITER_KEY + " not specified for table " + 
region.getTableDescriptor().getTableName() +
         ". Using default RegionSplitPolicy");
       return;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
index 6138f5f..3bfec52 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java
@@ -38,7 +38,7 @@ public class FlushAllLargeStoresPolicy extends 
FlushLargeStoresPolicy{
   @Override
   protected void configureForRegion(HRegion region) {
     super.configureForRegion(region);
-    int familyNumber = region.getTableDesc().getFamilies().size();
+    int familyNumber = region.getTableDescriptor().getColumnFamilyCount();
     if (familyNumber <= 1) {
       // No need to parse and set flush size lower bound if only one family
       // Family number might also be zero in some of our unit test case
@@ -50,7 +50,7 @@ public class FlushAllLargeStoresPolicy extends 
FlushLargeStoresPolicy{
   @Override
   public Collection<Store> selectStoresToFlush() {
     // no need to select stores if only one family
-    if (region.getTableDesc().getFamilies().size() == 1) {
+    if (region.getTableDescriptor().getColumnFamilyCount() == 1) {
       return region.stores.values();
     }
     // start selection

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java
index 119fdb5..f8850ab 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java
@@ -43,7 +43,7 @@ public abstract class FlushLargeStoresPolicy extends 
FlushPolicy {
   protected long flushSizeLowerBound = -1;
 
   protected long getFlushSizeLowerBound(HRegion region) {
-    int familyNumber = region.getTableDesc().getFamilies().size();
+    int familyNumber = region.getTableDescriptor().getColumnFamilyCount();
     // For multiple families, lower bound is the "average flush size" by 
default
     // unless setting in configuration is larger.
     long flushSizeLowerBound = region.getMemstoreFlushSize() / familyNumber;
@@ -55,11 +55,11 @@ public abstract class FlushLargeStoresPolicy extends 
FlushPolicy {
     }
     // use the setting in table description if any
     String flushedSizeLowerBoundString =
-        
region.getTableDesc().getValue(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND);
+        
region.getTableDescriptor().getValue(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND);
     if (flushedSizeLowerBoundString == null) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("No " + HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND
-            + " set in description of table " + 
region.getTableDesc().getTableName()
+            + " set in description of table " + 
region.getTableDescriptor().getTableName()
             + ", use config (" + flushSizeLowerBound + ") instead");
       }
     } else {
@@ -69,7 +69,7 @@ public abstract class FlushLargeStoresPolicy extends 
FlushPolicy {
         // fall back for fault setting
         LOG.warn("Number format exception when parsing "
             + HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND + " for table "
-            + region.getTableDesc().getTableName() + ":" + 
flushedSizeLowerBoundString + ". " + nfe
+            + region.getTableDescriptor().getTableName() + ":" + 
flushedSizeLowerBoundString + ". " + nfe
             + ", use config (" + flushSizeLowerBound + ") instead");
 
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java
index b93594e..64ca0c3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /**
@@ -47,7 +48,7 @@ public class FlushPolicyFactory {
    * Create the FlushPolicy configured for the given table.
    */
   public static FlushPolicy create(HRegion region, Configuration conf) throws 
IOException {
-    Class<? extends FlushPolicy> clazz = 
getFlushPolicyClass(region.getTableDesc(), conf);
+    Class<? extends FlushPolicy> clazz = 
getFlushPolicyClass(region.getTableDescriptor(), conf);
     FlushPolicy policy = ReflectionUtils.newInstance(clazz, conf);
     policy.configureForRegion(region);
     return policy;
@@ -56,7 +57,7 @@ public class FlushPolicyFactory {
   /**
    * Get FlushPolicy class for the given table.
    */
-  public static Class<? extends FlushPolicy> 
getFlushPolicyClass(HTableDescriptor htd,
+  public static Class<? extends FlushPolicy> 
getFlushPolicyClass(TableDescriptor htd,
       Configuration conf) throws IOException {
     String className = htd.getFlushPolicyClassName();
     if (className == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index c240df3..fb837e8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterList;
@@ -91,7 +92,7 @@ public class HMobStore extends HStore {
   private volatile long mobFlushedCellsSize = 0;
   private volatile long mobScanCellsCount = 0;
   private volatile long mobScanCellsSize = 0;
-  private HColumnDescriptor family;
+  private ColumnFamilyDescriptor family;
   private Map<String, List<Path>> map = new ConcurrentHashMap<>();
   private final IdLock keyLock = new IdLock();
   // When we add a MOB reference cell to the HFile, we will add 2 tags along 
with it
@@ -102,7 +103,7 @@ public class HMobStore extends HStore {
   // cloning snapshot for mob files.
   private final byte[] refCellTags;
 
-  public HMobStore(final HRegion region, final HColumnDescriptor family,
+  public HMobStore(final HRegion region, final ColumnFamilyDescriptor family,
       final Configuration confParam) throws IOException {
     super(region, family, confParam);
     this.family = family;
@@ -112,7 +113,7 @@ public class HMobStore extends HStore {
         family.getNameAsString());
     List<Path> locations = new ArrayList<>(2);
     locations.add(mobFamilyPath);
-    TableName tn = region.getTableDesc().getTableName();
+    TableName tn = region.getTableDescriptor().getTableName();
     locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, 
MobUtils.getMobRegionInfo(tn)
         .getEncodedName(), family.getNameAsString()));
     map.put(Bytes.toString(tn.getName()), locations);
@@ -128,7 +129,7 @@ public class HMobStore extends HStore {
    * Creates the mob cache config.
    */
   @Override
-  protected void createCacheConf(HColumnDescriptor family) {
+  protected void createCacheConf(ColumnFamilyDescriptor family) {
     cacheConf = new MobCacheConfig(conf, family);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b460d1a..b02b042 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -88,12 +88,10 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -105,6 +103,7 @@ import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.UnknownScannerException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
@@ -117,6 +116,8 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
 import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
@@ -635,7 +636,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // Coprocessor host
   private RegionCoprocessorHost coprocessorHost;
 
-  private HTableDescriptor htableDescriptor = null;
+  private TableDescriptor htableDescriptor = null;
   private RegionSplitPolicy splitPolicy;
   private FlushPolicy flushPolicy;
 
@@ -675,7 +676,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   @VisibleForTesting
   public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
       final Configuration confParam, final HRegionInfo regionInfo,
-      final HTableDescriptor htd, final RegionServerServices rsServices) {
+      final TableDescriptor htd, final RegionServerServices rsServices) {
     this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo),
       wal, confParam, htd, rsServices);
   }
@@ -697,7 +698,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @param rsServices reference to {@link RegionServerServices} or null
    */
   public HRegion(final HRegionFileSystem fs, final WAL wal, final 
Configuration confParam,
-      final HTableDescriptor htd, final RegionServerServices rsServices) {
+      final TableDescriptor htd, final RegionServerServices rsServices) {
     if (htd == null) {
       throw new IllegalArgumentException("Need table descriptor");
     }
@@ -727,10 +728,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
     this.isLoadingCfsOnDemandDefault = 
conf.getBoolean(LOAD_CFS_ON_DEMAND_CONFIG_KEY, true);
     this.htableDescriptor = htd;
-    Set<byte[]> families = this.htableDescriptor.getFamiliesKeys();
+    Set<byte[]> families = this.htableDescriptor.getColumnFamilyNames();
     for (byte[] family : families) {
       if (!replicationScope.containsKey(family)) {
-        int scope = htd.getFamily(family).getScope();
+        int scope = htd.getColumnFamily(family).getScope();
         // Only store those families that has NON-DEFAULT scope
         if (scope != REPLICATION_SCOPE_LOCAL) {
           // Do a copy before storing it here.
@@ -826,7 +827,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
     if (flushSize <= 0) {
       flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
-        HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
+        TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE);
     }
     this.memstoreFlushSize = flushSize;
     this.blockingMemStoreSize = this.memstoreFlushSize *
@@ -858,7 +859,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
     //Refuse to open the region if there is no column family in the table
     if (htableDescriptor.getColumnFamilyCount() == 0) {
-      throw new DoNotRetryIOException("Table " + 
htableDescriptor.getNameAsString() +
+      throw new DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
           " should have at least one column family.");
     }
 
@@ -987,14 +988,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     // initialized to -1 so that we pick up MemstoreTS from column families
     long maxMemstoreTS = -1;
 
-    if (!htableDescriptor.getFamilies().isEmpty()) {
+    if (htableDescriptor.getColumnFamilyCount() != 0) {
       // initialize the thread pool for opening stores in parallel.
       ThreadPoolExecutor storeOpenerThreadPool =
         getStoreOpenAndCloseThreadPool("StoreOpener-" + 
this.getRegionInfo().getShortNameToLog());
       CompletionService<HStore> completionService = new 
ExecutorCompletionService<>(storeOpenerThreadPool);
 
       // initialize each store in parallel
-      for (final HColumnDescriptor family : htableDescriptor.getFamilies()) {
+      for (final ColumnFamilyDescriptor family : 
htableDescriptor.getColumnFamilies()) {
         status.setStatus("Instantiating store for column family " + family);
         completionService.submit(new Callable<HStore>() {
           @Override
@@ -1006,10 +1007,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       boolean allStoresOpened = false;
       boolean hasSloppyStores = false;
       try {
-        for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) {
+        for (int i = 0; i < htableDescriptor.getColumnFamilyCount(); i++) {
           Future<HStore> future = completionService.take();
           HStore store = future.get();
-          this.stores.put(store.getFamily().getName(), store);
+          this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
           if (store.isSloppyMemstore()) {
             hasSloppyStores = true;
           }
@@ -1027,8 +1028,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
         }
         allStoresOpened = true;
         if(hasSloppyStores) {
-          
htableDescriptor.setFlushPolicyClassName(FlushNonSloppyStoresFirstPolicy.class
-              .getName());
+          htableDescriptor = 
TableDescriptorBuilder.newBuilder(htableDescriptor)
+                  
.setFlushPolicyClassName(FlushNonSloppyStoresFirstPolicy.class.getName())
+                  .build();
           LOG.info("Setting FlushNonSloppyStoresFirstPolicy for the region=" + 
this);
         }
       } catch (InterruptedException e) {
@@ -1076,7 +1078,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       for (StoreFile storeFile: storeFiles) {
         storeFileNames.add(storeFile.getPath());
       }
-      allStoreFiles.put(store.getFamily().getName(), storeFileNames);
+      allStoreFiles.put(store.getColumnFamilyDescriptor().getName(), 
storeFileNames);
     }
     return allStoreFiles;
   }
@@ -1146,13 +1148,13 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   /**
    * This is a helper function to compute HDFS block distribution on demand
    * @param conf configuration
-   * @param tableDescriptor HTableDescriptor of the table
+   * @param tableDescriptor TableDescriptor of the table
    * @param regionInfo encoded name of the region
    * @return The HDFS blocks distribution for the given region.
    * @throws IOException
    */
   public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final 
Configuration conf,
-      final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo) 
throws IOException {
+      final TableDescriptor tableDescriptor, final HRegionInfo regionInfo) 
throws IOException {
     Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), 
tableDescriptor.getTableName());
     return computeHDFSBlocksDistribution(conf, tableDescriptor, regionInfo, 
tablePath);
   }
@@ -1160,20 +1162,20 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   /**
    * This is a helper function to compute HDFS block distribution on demand
    * @param conf configuration
-   * @param tableDescriptor HTableDescriptor of the table
+   * @param tableDescriptor TableDescriptor of the table
    * @param regionInfo encoded name of the region
    * @param tablePath the table directory
    * @return The HDFS blocks distribution for the given region.
    * @throws IOException
    */
   public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final 
Configuration conf,
-      final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo,  
Path tablePath)
+      final TableDescriptor tableDescriptor, final HRegionInfo regionInfo,  
Path tablePath)
       throws IOException {
     HDFSBlocksDistribution hdfsBlocksDistribution = new 
HDFSBlocksDistribution();
     FileSystem fs = tablePath.getFileSystem(conf);
 
     HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, 
regionInfo);
-    for (HColumnDescriptor family : tableDescriptor.getFamilies()) {
+    for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) {
       List<LocatedFileStatus> locatedFileStatusList = HRegionFileSystem
           .getStoreFilesLocatedStatus(regionFs, family.getNameAsString(), 
true);
       if (locatedFileStatusList == null) {
@@ -1338,7 +1340,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
         && wasRecovering && !newState) {
 
       // force a flush only if region replication is set up for this region. 
Otherwise no need.
-      boolean forceFlush = getTableDesc().getRegionReplication() > 1;
+      boolean forceFlush = getTableDescriptor().getRegionReplication() > 1;
 
       MonitoredTask status = TaskMonitor.get().createStatus("Recovering region 
" + this);
 
@@ -1672,7 +1674,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
               .submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
                 @Override
                 public Pair<byte[], Collection<StoreFile>> call() throws 
IOException {
-                  return new Pair<>(store.getFamily().getName(), 
store.close());
+                  return new 
Pair<>(store.getColumnFamilyDescriptor().getName(), store.close());
                 }
               });
         }
@@ -1799,7 +1801,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
   protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool(
       final String threadNamePrefix) {
-    int numStores = Math.max(1, this.htableDescriptor.getFamilies().size());
+    int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount());
     int maxThreads = Math.min(numStores,
         conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX,
             HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX));
@@ -1808,7 +1810,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
   protected ThreadPoolExecutor getStoreFileOpenAndCloseThreadPool(
       final String threadNamePrefix) {
-    int numStores = Math.max(1, this.htableDescriptor.getFamilies().size());
+    int numStores = Math.max(1, this.htableDescriptor.getColumnFamilyCount());
     int maxThreads = Math.max(1,
         conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX,
             HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX)
@@ -1842,10 +1844,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   
//////////////////////////////////////////////////////////////////////////////
 
   @Override
-  public HTableDescriptor getTableDesc() {
+  public TableDescriptor getTableDescriptor() {
     return this.htableDescriptor;
   }
 
+  @VisibleForTesting
+  void setTableDescriptor(TableDescriptor desc) {
+    htableDescriptor = desc;
+  }
+
   /** @return WAL in use for this region */
   public WAL getWAL() {
     return this.wal;
@@ -2280,7 +2287,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    */
   boolean shouldFlushStore(Store store) {
     long earliest = 
this.wal.getEarliestMemstoreSeqNum(getRegionInfo().getEncodedNameAsBytes(),
-      store.getFamily().getName()) - 1;
+      store.getColumnFamilyDescriptor().getName()) - 1;
     if (earliest > 0 && earliest + flushPerChanges < mvcc.getReadPoint()) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Flush column family " + store.getColumnFamilyName() + " of 
" +
@@ -2461,7 +2468,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
     Map<byte[], Long> flushedFamilyNamesToSeq = new HashMap<>();
     for (Store store: storesToFlush) {
-      flushedFamilyNamesToSeq.put(store.getFamily().getName(),
+      flushedFamilyNamesToSeq.put(store.getColumnFamilyDescriptor().getName(),
           ((HStore) store).preFlushSeqIDEstimation());
     }
 
@@ -2501,9 +2508,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       for (Store s : storesToFlush) {
         MemstoreSize flushableSize = s.getSizeToFlush();
         totalSizeOfFlushableStores.incMemstoreSize(flushableSize);
-        storeFlushCtxs.put(s.getFamily().getName(), 
s.createFlushContext(flushOpSeqId));
-        committedFiles.put(s.getFamily().getName(), null); // for writing 
stores to WAL
-        storeFlushableSize.put(s.getFamily().getName(), flushableSize);
+        storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(), 
s.createFlushContext(flushOpSeqId));
+        committedFiles.put(s.getColumnFamilyDescriptor().getName(), null); // 
for writing stores to WAL
+        storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
flushableSize);
       }
 
       // write the snapshot start to WAL
@@ -2661,7 +2668,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
         if (needsCompaction) {
           compactionRequested = true;
         }
-        byte[] storeName = it.next().getFamily().getName();
+        byte[] storeName = it.next().getColumnFamilyDescriptor().getName();
         List<Path> storeCommittedFiles = flush.getCommittedFiles();
         committedFiles.put(storeName, storeCommittedFiles);
         // Flush committed no files, indicating flush is empty or flush was 
canceled
@@ -2796,7 +2803,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       // Verify families are all valid
       if (!scan.hasFamilies()) {
         // Adding all families to scanner
-        for (byte[] family : this.htableDescriptor.getFamiliesKeys()) {
+        for (byte[] family : this.htableDescriptor.getColumnFamilyNames()) {
           scan.addFamily(family);
         }
       } else {
@@ -2831,7 +2838,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   public void prepareDelete(Delete delete) throws IOException {
     // Check to see if this is a deleteRow insert
     if(delete.getFamilyCellMap().isEmpty()){
-      for(byte [] family : this.htableDescriptor.getFamiliesKeys()){
+      for(byte [] family : this.htableDescriptor.getColumnFamilyNames()){
         // Don't eat the timestamp
         delete.addFamily(family, delete.getTimeStamp());
       }
@@ -3643,7 +3650,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private void removeNonExistentColumnFamilyForReplay(final Map<byte[], 
List<Cell>> familyMap) {
     List<byte[]> nonExistentList = null;
     for (byte[] family : familyMap.keySet()) {
-      if (!this.htableDescriptor.hasFamily(family)) {
+      if (!this.htableDescriptor.hasColumnFamily(family)) {
         if (nonExistentList == null) {
           nonExistentList = new ArrayList<>();
         }
@@ -3997,7 +4004,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private void applyToMemstore(final Store store, final List<Cell> cells, 
final boolean delta,
       MemstoreSize memstoreSize) throws IOException {
     // Any change in how we update Store/MemStore needs to also be done in 
other applyToMemstore!!!!
-    boolean upsert = delta && store.getFamily().getMaxVersions() == 1;
+    boolean upsert = delta && 
store.getColumnFamilyDescriptor().getMaxVersions() == 1;
     if (upsert) {
       ((HStore) store).upsert(cells, getSmallestReadPoint(), memstoreSize);
     } else {
@@ -4352,7 +4359,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
               continue;
             }
             // Figure which store the edit is meant for.
-            if (store == null || !CellUtil.matchingFamily(cell, 
store.getFamily().getName())) {
+            if (store == null || !CellUtil.matchingFamily(cell,
+                store.getColumnFamilyDescriptor().getName())) {
               store = getHStore(cell);
             }
             if (store == null) {
@@ -4369,7 +4377,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
               continue;
             }
             // Now, figure if we should skip this edit.
-            if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily()
+            if (key.getLogSeqNum() <= 
maxSeqIdInStores.get(store.getColumnFamilyDescriptor()
                 .getName())) {
               skippedEdits++;
               continue;
@@ -5170,12 +5178,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
               // only drop memstore snapshots if they are smaller than last 
flush for the store
               if (this.prepareFlushResult.flushOpSeqId <= storeSeqId) {
                 StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs 
== null ?
-                    null : 
this.prepareFlushResult.storeFlushCtxs.get(store.getFamily().getName());
+                    null : this.prepareFlushResult.storeFlushCtxs.get(
+                            store.getColumnFamilyDescriptor().getName());
                 if (ctx != null) {
                   MemstoreSize snapshotSize = store.getSizeToFlush();
                   ctx.abort();
                   this.decrMemstoreSize(snapshotSize);
-                  
this.prepareFlushResult.storeFlushCtxs.remove(store.getFamily().getName());
+                  this.prepareFlushResult.storeFlushCtxs.remove(
+                          store.getColumnFamilyDescriptor().getName());
                   totalFreedDataSize += snapshotSize.getDataSize();
                 }
               }
@@ -5280,7 +5290,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     return true;
   }
 
-  protected HStore instantiateHStore(final HColumnDescriptor family) throws 
IOException {
+  protected HStore instantiateHStore(final ColumnFamilyDescriptor family) 
throws IOException {
     if (family.isMobEnabled()) {
       if (HFile.getFormatVersion(this.conf) < 
HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
         throw new IOException("A minimum HFile version of "
@@ -6500,7 +6510,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @return the new instance
    */
   static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs,
-      Configuration conf, HRegionInfo regionInfo, final HTableDescriptor htd,
+      Configuration conf, HRegionInfo regionInfo, final TableDescriptor htd,
       RegionServerServices rsServices) {
     try {
       @SuppressWarnings("unchecked")
@@ -6509,7 +6519,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
       Constructor<? extends HRegion> c =
           regionClass.getConstructor(Path.class, WAL.class, FileSystem.class,
-              Configuration.class, HRegionInfo.class, HTableDescriptor.class,
+              Configuration.class, HRegionInfo.class, TableDescriptor.class,
               RegionServerServices.class);
 
       return c.newInstance(tableDir, wal, fs, conf, regionInfo, htd, 
rsServices);
@@ -6530,7 +6540,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @throws IOException
    */
   public static HRegion createHRegion(final HRegionInfo info, final Path 
rootDir,
-        final Configuration conf, final HTableDescriptor hTableDescriptor,
+        final Configuration conf, final TableDescriptor hTableDescriptor,
         final WAL wal, final boolean initialize)
   throws IOException {
     LOG.info("creating HRegion " + info.getTable().getNameAsString()
@@ -6546,7 +6556,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
   public static HRegion createHRegion(final HRegionInfo info, final Path 
rootDir,
                                       final Configuration conf,
-                                      final HTableDescriptor hTableDescriptor,
+                                      final TableDescriptor hTableDescriptor,
                                       final WAL wal)
     throws IOException {
     return createHRegion(info, rootDir, conf, hTableDescriptor, wal, true);
@@ -6565,7 +6575,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @throws IOException
    */
   public static HRegion openHRegion(final HRegionInfo info,
-      final HTableDescriptor htd, final WAL wal,
+      final TableDescriptor htd, final WAL wal,
       final Configuration conf)
   throws IOException {
     return openHRegion(info, htd, wal, conf, null, null);
@@ -6587,7 +6597,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @throws IOException
    */
   public static HRegion openHRegion(final HRegionInfo info,
-    final HTableDescriptor htd, final WAL wal, final Configuration conf,
+    final TableDescriptor htd, final WAL wal, final Configuration conf,
     final RegionServerServices rsServices,
     final CancelableProgressable reporter)
   throws IOException {
@@ -6608,7 +6618,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @throws IOException
    */
   public static HRegion openHRegion(Path rootDir, final HRegionInfo info,
-      final HTableDescriptor htd, final WAL wal, final Configuration conf)
+      final TableDescriptor htd, final WAL wal, final Configuration conf)
   throws IOException {
     return openHRegion(rootDir, info, htd, wal, conf, null, null);
   }
@@ -6629,7 +6639,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @throws IOException
    */
   public static HRegion openHRegion(final Path rootDir, final HRegionInfo info,
-      final HTableDescriptor htd, final WAL wal, final Configuration conf,
+      final TableDescriptor htd, final WAL wal, final Configuration conf,
       final RegionServerServices rsServices,
       final CancelableProgressable reporter)
   throws IOException {
@@ -6658,7 +6668,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @throws IOException
    */
   public static HRegion openHRegion(final Configuration conf, final FileSystem 
fs,
-      final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, 
final WAL wal)
+      final Path rootDir, final HRegionInfo info, final TableDescriptor htd, 
final WAL wal)
       throws IOException {
     return openHRegion(conf, fs, rootDir, info, htd, wal, null, null);
   }
@@ -6680,7 +6690,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @throws IOException
    */
   public static HRegion openHRegion(final Configuration conf, final FileSystem 
fs,
-      final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, 
final WAL wal,
+      final Path rootDir, final HRegionInfo info, final TableDescriptor htd, 
final WAL wal,
       final RegionServerServices rsServices, final CancelableProgressable 
reporter)
       throws IOException {
     Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());
@@ -6704,7 +6714,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
    * @throws IOException
    */
   public static HRegion openHRegion(final Configuration conf, final FileSystem 
fs,
-      final Path rootDir, final Path tableDir, final HRegionInfo info, final 
HTableDescriptor htd,
+      final Path rootDir, final Path tableDir, final HRegionInfo info, final 
TableDescriptor htd,
       final WAL wal, final RegionServerServices rsServices,
       final CancelableProgressable reporter)
       throws IOException {
@@ -6732,7 +6742,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       throws IOException {
     HRegionFileSystem regionFs = other.getRegionFileSystem();
     HRegion r = newHRegion(regionFs.getTableDir(), other.getWAL(), 
regionFs.getFileSystem(),
-        other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
+        other.baseConf, other.getRegionInfo(), other.getTableDescriptor(), 
null);
     return r.openHRegion(reporter);
   }
 
@@ -6769,7 +6779,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 
   public static void warmupHRegion(final HRegionInfo info,
-      final HTableDescriptor htd, final WAL wal, final Configuration conf,
+      final TableDescriptor htd, final WAL wal, final Configuration conf,
       final RegionServerServices rsServices,
       final CancelableProgressable reporter)
       throws IOException {
@@ -6797,14 +6807,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
 
   private void checkCompressionCodecs() throws IOException {
-    for (HColumnDescriptor fam: this.htableDescriptor.getColumnFamilies()) {
+    for (ColumnFamilyDescriptor fam: 
this.htableDescriptor.getColumnFamilies()) {
       CompressionTest.testCompression(fam.getCompressionType());
       CompressionTest.testCompression(fam.getCompactionCompressionType());
     }
   }
 
   private void checkEncryption() throws IOException {
-    for (HColumnDescriptor fam: this.htableDescriptor.getColumnFamilies()) {
+    for (ColumnFamilyDescriptor fam: 
this.htableDescriptor.getColumnFamilies()) {
       EncryptionTest.testEncryption(conf, fam.getEncryptionType(), 
fam.getEncryptionKey());
     }
   }
@@ -6825,7 +6835,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
     // Create the daughter HRegion instance
     HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), 
fs.getFileSystem(),
-        this.getBaseConf(), hri, this.getTableDesc(), rsServices);
+        this.getBaseConf(), hri, this.getTableDescriptor(), rsServices);
     r.readRequestsCount.add(this.getReadRequestsCount() / 2);
     r.filteredReadRequestsCount.add(this.getFilteredReadRequestsCount() / 2);
     r.writeRequestsCount.add(this.getWriteRequestsCount() / 2);
@@ -6842,7 +6852,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       final HRegion region_b) throws IOException {
     HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(),
         fs.getFileSystem(), this.getBaseConf(), mergedRegionInfo,
-        this.getTableDesc(), this.rsServices);
+        this.getTableDescriptor(), this.rsServices);
     r.readRequestsCount.add(this.getReadRequestsCount()
         + region_b.getReadRequestsCount());
     r.filteredReadRequestsCount.add(this.getFilteredReadRequestsCount()
@@ -6949,7 +6959,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
         checkFamily(family);
       }
     } else { // Adding all families to scanner
-      for (byte[] family : this.htableDescriptor.getFamiliesKeys()) {
+      for (byte[] family : this.htableDescriptor.getColumnFamilyNames()) {
         get.addFamily(family);
       }
     }
@@ -7472,7 +7482,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       final Mutation mutation, final Durability effectiveDurability, final 
long now,
       final List<Cell> deltas, final List<Cell> results)
   throws IOException {
-    byte [] columnFamily = store.getFamily().getName();
+    byte [] columnFamily = store.getColumnFamilyDescriptor().getName();
     List<Cell> toApply = new ArrayList<>(deltas.size());
     // Get previous values for all columns in this family.
     List<Cell> currentValues = get(mutation, store, deltas,
@@ -7642,7 +7652,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       get.setIsolationLevel(isolation);
     }
     for (Cell cell: coordinates) {
-      get.addColumn(store.getFamily().getName(), 
CellUtil.cloneQualifier(cell));
+      get.addColumn(store.getColumnFamilyDescriptor().getName(), 
CellUtil.cloneQualifier(cell));
     }
     // Increments carry time range. If an Increment instance, put it on the 
Get.
     if (tr != null) {
@@ -7665,7 +7675,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
   void checkFamily(final byte [] family)
   throws NoSuchColumnFamilyException {
-    if (!this.htableDescriptor.hasFamily(family)) {
+    if (!this.htableDescriptor.hasColumnFamily(family)) {
       throw new NoSuchColumnFamilyException("Column family " +
           Bytes.toString(family) + " does not exist in region " + this
           + " in table " + this.htableDescriptor);
@@ -8204,7 +8214,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     buf.append(getRegionInfo().isMetaTable() ? " meta table " : " ");
     buf.append("stores: ");
     for (Store s : getStores()) {
-      buf.append(s.getFamily().getNameAsString());
+      buf.append(s.getColumnFamilyDescriptor().getNameAsString());
       buf.append(" size: ");
       buf.append(s.getSizeOfMemStore().getDataSize());
       buf.append(" ");

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f0537e0..3593ce6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -3013,7 +3013,7 @@ public class HRegionServer extends HasThread implements
     Set<TableName> tables = new HashSet<>();
     synchronized (this.onlineRegions) {
       for (Region region: this.onlineRegions.values()) {
-        tables.add(region.getTableDesc().getTableName());
+        tables.add(region.getTableDescriptor().getTableName());
       }
     }
     return tables;
@@ -3167,7 +3167,7 @@ public class HRegionServer extends HasThread implements
          if (exceptionToThrow instanceof IOException) throw 
(IOException)exceptionToThrow;
          throw new IOException(exceptionToThrow);
        }
-       if (regionToClose.getTableDesc().hasSerialReplicationScope()) {
+       if (regionToClose.getTableDescriptor().hasSerialReplicationScope()) {
          // For serial replication, we need add a final barrier on this 
region. But the splitting
          // or merging may be reverted, so we should make sure if we reopen 
this region, the open
          // barrier is same as this final barrier
@@ -3185,7 +3185,7 @@ public class HRegionServer extends HasThread implements
          Put finalBarrier = MetaTableAccessor.makeBarrierPut(
            Bytes.toBytes(regionEncodedName.get(i)),
            seq,
-           regionToClose.getTableDesc().getTableName().getName());
+           regionToClose.getTableDescriptor().getTableName().getName());
          MetaTableAccessor.putToMetaTable(getConnection(), finalBarrier);
        }
        // Offline the region

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 03c31af..1639953 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -98,6 +98,7 @@ import com.google.common.collect.ImmutableCollection;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 
 /**
  * A Store holds a column family in a Region.  Its a memstore and a set of zero
@@ -129,7 +130,7 @@ public class HStore implements Store {
   protected final MemStore memstore;
   // This stores directory in the filesystem.
   protected final HRegion region;
-  private final HColumnDescriptor family;
+  private final ColumnFamilyDescriptor family;
   private final HRegionFileSystem fs;
   protected Configuration conf;
   protected CacheConfig cacheConf;
@@ -213,7 +214,7 @@ public class HStore implements Store {
    * failed.  Can be null.
    * @throws IOException
    */
-  protected HStore(final HRegion region, final HColumnDescriptor family,
+  protected HStore(final HRegion region, final ColumnFamilyDescriptor family,
       final Configuration confParam) throws IOException {
 
     this.fs = region.getRegionFileSystem();
@@ -227,7 +228,7 @@ public class HStore implements Store {
     // add global config first, then table and cf overrides, then cf metadata.
     this.conf = new CompoundConfiguration()
       .add(confParam)
-      .addStringMap(region.getTableDesc().getConfiguration())
+      .addStringMap(region.getTableDescriptor().getConfiguration())
       .addStringMap(family.getConfiguration())
       .addBytesMap(family.getValues());
     this.blocksize = family.getBlocksize();
@@ -320,7 +321,7 @@ public class HStore implements Store {
    * Creates the cache config.
    * @param family The current column family.
    */
-  protected void createCacheConf(final HColumnDescriptor family) {
+  protected void createCacheConf(final ColumnFamilyDescriptor family) {
     this.cacheConf = new CacheConfig(conf, family);
   }
 
@@ -341,7 +342,7 @@ public class HStore implements Store {
    * @param family
    * @return TTL in seconds of the specified family
    */
-  public static long determineTTLFromFamily(final HColumnDescriptor family) {
+  public static long determineTTLFromFamily(final ColumnFamilyDescriptor 
family) {
     // HCD.getTimeToLive returns ttl in seconds.  Convert to milliseconds.
     long ttl = family.getTimeToLive();
     if (ttl == HConstants.FOREVER) {
@@ -455,7 +456,7 @@ public class HStore implements Store {
   }
 
   @Override
-  public HColumnDescriptor getFamily() {
+  public ColumnFamilyDescriptor getColumnFamilyDescriptor() {
     return this.family;
   }
 
@@ -1418,7 +1419,7 @@ public class HStore implements Store {
     }
     HRegionInfo info = this.region.getRegionInfo();
     CompactionDescriptor compactionDescriptor = 
ProtobufUtil.toCompactionDescriptor(info,
-        family.getName(), inputPaths, outputPaths, 
fs.getStoreDir(getFamily().getNameAsString()));
+        family.getName(), inputPaths, outputPaths, 
fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString()));
     // Fix reaching into Region to get the maxWaitForSeqId.
     // Does this method belong in Region altogether given it is making so many 
references up there?
     // Could be Region#writeCompactionMarker(compactionDescriptor);
@@ -1736,9 +1737,9 @@ public class HStore implements Store {
 
   private void removeUnneededFiles() throws IOException {
     if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) return;
-    if (getFamily().getMinVersions() > 0) {
+    if (getColumnFamilyDescriptor().getMinVersions() > 0) {
       LOG.debug("Skipping expired store file removal due to min version being 
" +
-          getFamily().getMinVersions());
+          getColumnFamilyDescriptor().getMinVersions());
       return;
     }
     this.lock.readLock().lock();
@@ -2546,7 +2547,7 @@ public class HStore implements Store {
         }
         // Only if this is successful it has to be removed
         try {
-          this.fs.removeStoreFiles(this.getFamily().getNameAsString(), 
filesToRemove);
+          
this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), 
filesToRemove);
         } catch (FailedArchiveException fae) {
           // Even if archiving some files failed, we still need to clear out 
any of the
           // files which were successfully archived.  Otherwise we will 
receive a

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
index 8a310b8..3092e5b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
@@ -24,9 +24,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 
 /**
@@ -56,13 +57,13 @@ public class IncreasingToUpperBoundRegionSplitPolicy 
extends ConstantSizeRegionS
     if (initialSize > 0) {
       return;
     }
-    HTableDescriptor desc = region.getTableDesc();
+    TableDescriptor desc = region.getTableDescriptor();
     if (desc != null) {
       initialSize = 2 * desc.getMemStoreFlushSize();
     }
     if (initialSize <= 0) {
       initialSize = 2 * conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
-                                     
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
+                                     
TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE);
     }
   }
 
@@ -106,7 +107,7 @@ public class IncreasingToUpperBoundRegionSplitPolicy 
extends ConstantSizeRegionS
     if (rss == null) {
       return 0;
     }
-    TableName tablename = region.getTableDesc().getTableName();
+    TableName tablename = region.getTableDescriptor().getTableName();
     int tableRegionsCount = 0;
     try {
       List<Region> hri = rss.getOnlineRegions(tablename);

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java
index b987a88..8b93756 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java
@@ -46,14 +46,14 @@ public class KeyPrefixRegionSplitPolicy extends 
IncreasingToUpperBoundRegionSpli
     prefixLength = 0;
 
     // read the prefix length from the table descriptor
-    String prefixLengthString = region.getTableDesc().getValue(
+    String prefixLengthString = region.getTableDescriptor().getValue(
         PREFIX_LENGTH_KEY);
     if (prefixLengthString == null) {
       //read the deprecated value
-      prefixLengthString = 
region.getTableDesc().getValue(PREFIX_LENGTH_KEY_DEPRECATED);
+      prefixLengthString = 
region.getTableDescriptor().getValue(PREFIX_LENGTH_KEY_DEPRECATED);
       if (prefixLengthString == null) {
         LOG.error(PREFIX_LENGTH_KEY + " not specified for table "
-            + region.getTableDesc().getTableName()
+            + region.getTableDescriptor().getTableName()
             + ". Using default RegionSplitPolicy");
         return;
       }
@@ -63,13 +63,13 @@ public class KeyPrefixRegionSplitPolicy extends 
IncreasingToUpperBoundRegionSpli
     } catch (NumberFormatException nfe) {
       /* Differentiate NumberFormatException from an invalid value range 
reported below. */
       LOG.error("Number format exception when parsing " + PREFIX_LENGTH_KEY + 
" for table "
-          + region.getTableDesc().getTableName() + ":"
+          + region.getTableDescriptor().getTableName() + ":"
           + prefixLengthString + ". " + nfe);
       return;
     }
     if (prefixLength <= 0) {
       LOG.error("Invalid value for " + PREFIX_LENGTH_KEY + " for table "
-          + region.getTableDesc().getTableName() + ":"
+          + region.getTableDescriptor().getTableName() + ":"
           + prefixLengthString + ". Using default RegionSplitPolicy");
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 2f2a4cf..f5d87b0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -551,7 +551,7 @@ class MemStoreFlusher implements FlushRequester {
   private boolean isTooManyStoreFiles(Region region) {
 
     // When compaction is disabled, the region is flushable
-    if (!region.getTableDesc().isCompactionEnabled()) {
+    if (!region.getTableDescriptor().isCompactionEnabled()) {
       return false;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
index ecee53f..58a0e6e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
@@ -30,7 +30,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.metrics2.MetricsExecutor;
 
@@ -65,7 +65,7 @@ public class MetricsRegionWrapperImpl implements 
MetricsRegionWrapper, Closeable
 
   @Override
   public String getTableName() {
-    HTableDescriptor tableDesc = this.region.getTableDesc();
+    TableDescriptor tableDesc = this.region.getTableDescriptor();
     if (tableDesc == null) {
       return UNKNOWN;
     }
@@ -74,7 +74,7 @@ public class MetricsRegionWrapperImpl implements 
MetricsRegionWrapper, Closeable
 
   @Override
   public String getNamespace() {
-    HTableDescriptor tableDesc = this.region.getTableDesc();
+    TableDescriptor tableDesc = this.region.getTableDescriptor();
     if (tableDesc == null) {
       return UNKNOWN;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java
index c5f0f7b..4ac99d7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java
@@ -62,7 +62,7 @@ public class MetricsTableWrapperAggregateImpl implements 
MetricsTableWrapperAggr
       Map<TableName, MetricsTableValues> localMetricsTableMap = new 
HashMap<>();
 
       for (Region r : regionServer.getOnlineRegionsLocalContext()) {
-        TableName tbl= r.getTableDesc().getTableName();
+        TableName tbl= r.getTableDescriptor().getTableName();
         MetricsTableValues metricsTable = localMetricsTableMap.get(tbl);
         if (metricsTable == null) {
           metricsTable = new MetricsTableValues();

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 0c69bd9..9499a79 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -110,7 +110,7 @@ public class RSDumpServlet extends StateDumpServlet {
       if (hRegion.getLockedRows().size() > 0) {
         for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
           sb.setLength(0);
-          sb.append(hRegion.getTableDesc().getTableName()).append(",")
+          sb.append(hRegion.getTableDescriptor().getTableName()).append(",")
             .append(hRegion.getRegionInfo().getEncodedName()).append(",");
           sb.append(rowLockContext.toString());
           out.println(sb.toString());

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 8da34ce..4c0625a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -1548,7 +1548,7 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
       if (QuotaUtil.isQuotaEnabled(getConfiguration()) &&
           !Superusers.isSuperUser(RpcServer.getRequestUser()) &&
           
this.regionServer.getRegionServerSpaceQuotaManager().areCompactionsDisabled(
-              region.getTableDesc().getTableName())) {
+              region.getTableDescriptor().getTableName())) {
         throw new DoNotRetryIOException("Compactions on this region are "
             + "disabled due to a space quota violation.");
       }
@@ -1784,7 +1784,7 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
       requestCount.increment();
       Set<byte[]> columnFamilies;
       if (request.getFamilyCount() == 0) {
-        columnFamilies = region.getTableDesc().getFamiliesKeys();
+        columnFamilies = region.getTableDescriptor().getColumnFamilyNames();
       } else {
         columnFamilies = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR);
         for (ByteString cf: request.getFamilyList()) {
@@ -2890,7 +2890,7 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 
     if (!scan.hasFamilies()) {
       // Adding all families to scanner
-      for (byte[] family : region.getTableDesc().getFamiliesKeys()) {
+      for (byte[] family : region.getTableDescriptor().getColumnFamilyNames()) 
{
         scan.addFamily(family);
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 63e18c3..4c188fe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
@@ -79,7 +80,7 @@ public interface Region extends ConfigurationObserver {
   HRegionInfo getRegionInfo();
 
   /** @return table descriptor for this region */
-  HTableDescriptor getTableDesc();
+  TableDescriptor getTableDescriptor();
 
   /** @return true if region is available (not closed and not closing) */
   boolean isAvailable();

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 0abc988..4570cec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
 import org.apache.hadoop.hbase.coprocessor.EndpointObserver;
@@ -260,7 +261,7 @@ public class RegionCoprocessorHost
   }
 
   static List<TableCoprocessorAttribute> 
getTableCoprocessorAttrsFromSchema(Configuration conf,
-      HTableDescriptor htd) {
+      TableDescriptor htd) {
     List<TableCoprocessorAttribute> result = Lists.newArrayList();
     for (Map.Entry<Bytes, Bytes> e: htd.getValues().entrySet()) {
       String key = Bytes.toString(e.getKey().get()).trim();
@@ -324,7 +325,7 @@ public class RegionCoprocessorHost
    * @throws IOException
    */
   public static void testTableCoprocessorAttrs(final Configuration conf,
-      final HTableDescriptor htd) throws IOException {
+      final TableDescriptor htd) throws IOException {
     String pathPrefix = UUID.randomUUID().toString();
     for (TableCoprocessorAttribute attr: 
getTableCoprocessorAttrsFromSchema(conf, htd)) {
       if (attr.getPriority() < 0) {
@@ -362,15 +363,15 @@ public class RegionCoprocessorHost
     // scan the table attributes for coprocessor load specifications
     // initialize the coprocessors
     List<RegionEnvironment> configured = new ArrayList<>();
-    for (TableCoprocessorAttribute attr: 
getTableCoprocessorAttrsFromSchema(conf, 
-        region.getTableDesc())) {
+    for (TableCoprocessorAttribute attr: 
getTableCoprocessorAttrsFromSchema(conf,
+        region.getTableDescriptor())) {
       // Load encompasses classloading and coprocessor initialization
       try {
         RegionEnvironment env = load(attr.getPath(), attr.getClassName(), 
attr.getPriority(),
           attr.getConf());
         configured.add(env);
         LOG.info("Loaded coprocessor " + attr.getClassName() + " from HTD of " 
+
-            region.getTableDesc().getTableName().getNameAsString() + " 
successfully.");
+            region.getTableDescriptor().getTableName().getNameAsString() + " 
successfully.");
       } catch (Throwable t) {
         // Coprocessor failed to load, do we abort on error?
         if (conf.getBoolean(ABORT_ON_ERROR_KEY, DEFAULT_ABORT_ON_ERROR)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
index 8cdfd3b..f980065 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
@@ -82,7 +82,7 @@ public class RegionServicesForStores {
   }
 
   public int getNumStores() {
-    return region.getTableDesc().getColumnFamilyCount();
+    return region.getTableDescriptor().getColumnFamilyCount();
   }
 
   // methods for tests

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
index e20b3e2..c6ee946 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
@@ -23,13 +23,14 @@ import java.util.List;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.base.Preconditions;
 
+
 /**
  * A split policy determines when a region should be split.
  * @see IncreasingToUpperBoundRegionSplitPolicy Default split policy since
@@ -101,14 +102,14 @@ public abstract class RegionSplitPolicy extends 
Configured {
   public static RegionSplitPolicy create(HRegion region,
       Configuration conf) throws IOException {
     Class<? extends RegionSplitPolicy> clazz = getSplitPolicyClass(
-        region.getTableDesc(), conf);
+        region.getTableDescriptor(), conf);
     RegionSplitPolicy policy = ReflectionUtils.newInstance(clazz, conf);
     policy.configureForRegion(region);
     return policy;
   }
 
   public static Class<? extends RegionSplitPolicy> getSplitPolicyClass(
-      HTableDescriptor htd, Configuration conf) throws IOException {
+      TableDescriptor htd, Configuration conf) throws IOException {
     String className = htd.getRegionSplitPolicyClassName();
     if (className == null) {
       className = conf.get(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
index 2a66e55..6be129f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -57,13 +57,13 @@ public class ScanInfo {
 
   /**
    * @param conf
-   * @param family {@link HColumnDescriptor} describing the column family
+   * @param family {@link ColumnFamilyDescriptor} describing the column family
    * @param ttl Store's TTL (in ms)
    * @param timeToPurgeDeletes duration in ms after which a delete marker can 
be purged during a
    *          major compaction.
    * @param comparator The store's comparator
    */
-  public ScanInfo(final Configuration conf, final HColumnDescriptor family, 
final long ttl,
+  public ScanInfo(final Configuration conf, final ColumnFamilyDescriptor 
family, final long ttl,
       final long timeToPurgeDeletes, final CellComparator comparator) {
     this(conf, family.getName(), family.getMinVersions(), 
family.getMaxVersions(), ttl,
         family.getKeepDeletedCells(), family.getBlocksize(), 
timeToPurgeDeletes, comparator);

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index b1473cb..5fde576 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -150,7 +150,7 @@ public class SecureBulkLoadManager {
     }
 
     String bulkToken =
-        createStagingDir(baseStagingDir, getActiveUser(), 
region.getTableDesc().getTableName())
+        createStagingDir(baseStagingDir, getActiveUser(), 
region.getTableDescriptor().getTableName())
             .toString();
 
     return bulkToken;

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index 766b562..e2fabae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -25,11 +25,11 @@ import java.util.NavigableSet;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
 import org.apache.hadoop.hbase.io.HeapSize;
@@ -361,7 +361,7 @@ public interface Store extends HeapSize, 
StoreConfigInformation, PropagatingConf
    */
   MemstoreSize getSizeOfSnapshot();
 
-  HColumnDescriptor getFamily();
+  ColumnFamilyDescriptor getColumnFamilyDescriptor();
 
   /**
    * @return The maximum sequence id in all store files.

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 13a5f01..603ff0a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -472,7 +472,7 @@ public class StoreFileScanner implements KeyValueScanner {
   @Override
   public boolean shouldUseScanner(Scan scan, Store store, long 
oldestUnexpiredTS) {
     // if the file has no entries, no need to validate or create a scanner.
-    byte[] cf = store.getFamily().getName();
+    byte[] cf = store.getColumnFamilyDescriptor().getName();
     TimeRange timeRange = scan.getColumnFamilyTimeRange().get(cf);
     if (timeRange == null) {
       timeRange = scan.getTimeRange();

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
index 3f9688d..aa7024b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
@@ -110,7 +110,7 @@ public class StripeStoreFlusher extends StoreFlusher {
       @Override
       public StoreFileWriter createWriter() throws IOException {
         StoreFileWriter writer = store.createWriterInTmp(
-            kvCount, store.getFamily().getCompressionType(),
+            kvCount, store.getColumnFamilyDescriptor().getCompressionType(),
             /* isCompaction = */ false,
             /* includeMVCCReadpoint = */ true,
             /* includesTags = */ true,

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 463ed86..de45240 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -93,9 +93,9 @@ public abstract class Compactor<T extends CellSink> {
     this.store = store;
     this.compactionKVMax =
       this.conf.getInt(HConstants.COMPACTION_KV_MAX, 
HConstants.COMPACTION_KV_MAX_DEFAULT);
-    this.compactionCompression = (this.store.getFamily() == null) ?
-        Compression.Algorithm.NONE : 
this.store.getFamily().getCompactionCompressionType();
-    this.keepSeqIdPeriod = 
Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, 
+    this.compactionCompression = (this.store.getColumnFamilyDescriptor() == 
null) ?
+        Compression.Algorithm.NONE : 
this.store.getColumnFamilyDescriptor().getCompactionCompressionType();
+    this.keepSeqIdPeriod = 
Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD,
       HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD);
     this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true);
     this.dropCacheMinor = conf.getBoolean(MINOR_COMPACTION_DROP_CACHE, true);
@@ -397,7 +397,7 @@ public abstract class Compactor<T extends CellSink> {
 
     throughputController.start(compactionName);
     KeyValueScanner kvs = (scanner instanceof KeyValueScanner)? 
(KeyValueScanner)scanner : null;
-    long shippedCallSizeLimit = (long) numofFilesToCompact * 
this.store.getFamily().getBlocksize();
+    long shippedCallSizeLimit = (long) numofFilesToCompact * 
this.store.getColumnFamilyDescriptor().getBlocksize();
     try {
       do {
         hasMore = scanner.next(cells, scannerContext);
@@ -499,7 +499,7 @@ public abstract class Compactor<T extends CellSink> {
   protected InternalScanner createScanner(Store store, List<StoreFileScanner> 
scanners,
       ScanType scanType, long smallestReadPoint, long earliestPutTs) throws 
IOException {
     Scan scan = new Scan();
-    scan.setMaxVersions(store.getFamily().getMaxVersions());
+    scan.setMaxVersions(store.getColumnFamilyDescriptor().getMaxVersions());
     return new StoreScanner(store, store.getScanInfo(), scan, scanners,
         scanType, smallestReadPoint, earliestPutTs);
   }
@@ -517,7 +517,7 @@ public abstract class Compactor<T extends CellSink> {
      long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow,
      byte[] dropDeletesToRow) throws IOException {
     Scan scan = new Scan();
-    scan.setMaxVersions(store.getFamily().getMaxVersions());
+    scan.setMaxVersions(store.getColumnFamilyDescriptor().getMaxVersions());
     return new StoreScanner(store, store.getScanInfo(), scan, scanners, 
smallestReadPoint,
         earliestPutTs, dropDeletesFromRow, dropDeletesToRow);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
index b3c4147..7216c7b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
@@ -50,6 +50,7 @@ public final class ThroughputControlUtil {
       }
     }
     return store.getRegionInfo().getRegionNameAsString() + NAME_DELIMITER
-        + store.getFamily().getNameAsString() + NAME_DELIMITER + opName + 
NAME_DELIMITER + counter;
+        + store.getColumnFamilyDescriptor().getNameAsString()
+        + NAME_DELIMITER + opName + NAME_DELIMITER + counter;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index b719eba..1eab848 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -359,7 +359,7 @@ public class AccessControlLists {
    * metadata table.
    */
   static boolean isAclRegion(Region region) {
-    return ACL_TABLE_NAME.equals(region.getTableDesc().getTableName());
+    return ACL_TABLE_NAME.equals(region.getTableDescriptor().getTableName());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/53ec9c5b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index aa0c094..e393804 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -134,6 +134,7 @@ import com.google.protobuf.Message;
 import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 
 /**
  * Provides basic authorization checks for data access and administrative
@@ -954,7 +955,7 @@ public class AccessController implements MasterObserver, 
RegionObserver, RegionS
     } else if (env instanceof RegionCoprocessorEnvironment) {
       // if running at region
       regionEnv = (RegionCoprocessorEnvironment) env;
-      
conf.addStringMap(regionEnv.getRegion().getTableDesc().getConfiguration());
+      
conf.addStringMap(regionEnv.getRegion().getTableDescriptor().getConfiguration());
       zk = regionEnv.getRegionServerServices().getZooKeeper();
       compatibleEarlyTermination = 
conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT,
         AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
@@ -1551,7 +1552,7 @@ public class AccessController implements MasterObserver, 
RegionObserver, RegionS
     Region region = getRegion(env);
     TableName table = getTableName(region);
     Map<ByteRange, Integer> cfVsMaxVersions = Maps.newHashMap();
-    for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
+    for (ColumnFamilyDescriptor hcd : 
region.getTableDescriptor().getColumnFamilies()) {
       cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), 
hcd.getMaxVersions());
     }
     if (!authResult.isAllowed()) {
@@ -2155,7 +2156,7 @@ public class AccessController implements MasterObserver, 
RegionObserver, RegionS
     User user = getActiveUser(ctx);
     for(Pair<byte[],String> el : familyPaths) {
       requirePermission(user, "preBulkLoadHFile",
-          ctx.getEnvironment().getRegion().getTableDesc().getTableName(),
+          ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(),
           el.getFirst(),
           null,
           Action.CREATE);
@@ -2173,7 +2174,7 @@ public class AccessController implements MasterObserver, 
RegionObserver, RegionS
   public void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> 
ctx,
       PrepareBulkLoadRequest request) throws IOException {
     requireAccess(getActiveUser(ctx), "prePrepareBulkLoad",
-        ctx.getEnvironment().getRegion().getTableDesc().getTableName(), 
Action.CREATE);
+        ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), 
Action.CREATE);
   }
 
   /**
@@ -2187,7 +2188,7 @@ public class AccessController implements MasterObserver, 
RegionObserver, RegionS
   public void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> 
ctx,
       CleanupBulkLoadRequest request) throws IOException {
     requireAccess(getActiveUser(ctx), "preCleanupBulkLoad",
-        ctx.getEnvironment().getRegion().getTableDesc().getTableName(), 
Action.CREATE);
+        ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), 
Action.CREATE);
   }
 
   /* ---- EndpointObserver implementation ---- */
@@ -2392,7 +2393,7 @@ public class AccessController implements MasterObserver, 
RegionObserver, RegionS
     AccessControlProtos.CheckPermissionsResponse response = null;
     try {
       User user = RpcServer.getRequestUser();
-      TableName tableName = 
regionEnv.getRegion().getTableDesc().getTableName();
+      TableName tableName = 
regionEnv.getRegion().getTableDescriptor().getTableName();
       for (Permission permission : permissions) {
         if (permission instanceof TablePermission) {
           // Check table permissions
@@ -2586,7 +2587,7 @@ public class AccessController implements MasterObserver, 
RegionObserver, RegionS
   @Override
   public void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> 
ctx, Region regionA,
       Region regionB) throws IOException {
-    requirePermission(getActiveUser(ctx), "mergeRegions", 
regionA.getTableDesc().getTableName(),
+    requirePermission(getActiveUser(ctx), "mergeRegions", 
regionA.getTableDescriptor().getTableName(),
         null, null, Action.ADMIN);
   }
 

Reply via email to