HBASE-11995 Use Connection and ConnectionFactory where possible (Solomon Duskis)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3beb168b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3beb168b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3beb168b

Branch: refs/heads/master
Commit: 3beb168b4fbf7628b40f0cf6030674338e9ea61b
Parents: 88cd708
Author: Enis Soztutar <e...@apache.org>
Authored: Fri Sep 26 11:49:37 2014 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Fri Sep 26 11:49:37 2014 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 230 +++++++++----------
 .../hadoop/hbase/client/ConnectionAdapter.java  |   2 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   2 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |   4 +-
 .../apache/hadoop/hbase/client/Registry.java    |   2 +-
 .../hadoop/hbase/client/ZooKeeperRegistry.java  |   2 +-
 .../hbase/client/TestClientNoCluster.java       |   8 +-
 .../SnapshotOfRegionAssignmentFromMeta.java     |  14 +-
 .../balancer/FavoredNodeAssignmentHelper.java   |   8 +-
 .../master/snapshot/RestoreSnapshotHandler.java |   4 +-
 .../regionserver/ReplicationSink.java           |   8 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |   9 +-
 .../hadoop/hbase/util/MultiHConnection.java     |   3 +-
 .../hadoop/hbase/HBaseTestingUtility.java       |   5 +-
 .../hadoop/hbase/PerformanceEvaluation.java     |  42 ++--
 .../hadoop/hbase/TestMetaTableAccessor.java     |  58 ++---
 .../hadoop/hbase/client/TestClientTimeouts.java |   4 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   2 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java |  42 ++--
 .../TestMasterOperationsForRegionReplicas.java  |  15 +-
 .../TestRegionReplicaReplicationEndpoint.java   |   6 +-
 ...egionReplicaReplicationEndpointNoMaster.java |   8 +-
 .../security/access/TestAccessController.java   |   6 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java |   4 +-
 .../hadoop/hbase/util/TestMergeTable.java       |   9 +-
 25 files changed, 250 insertions(+), 247 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 1a9d0a0..5c01d93 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -24,9 +24,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -133,65 +133,65 @@ public class MetaTableAccessor {
    * @return List of {@link org.apache.hadoop.hbase.client.Result}
    * @throws IOException
    */
-  public static List<Result> fullScanOfMeta(HConnection hConnection)
+  public static List<Result> fullScanOfMeta(Connection connection)
   throws IOException {
     CollectAllVisitor v = new CollectAllVisitor();
-    fullScan(hConnection, v, null);
+    fullScan(connection, v, null);
     return v.getResults();
   }
 
   /**
    * Performs a full scan of <code>hbase:meta</code>.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param visitor Visitor invoked against each row.
    * @throws IOException
    */
-  public static void fullScan(HConnection hConnection,
+  public static void fullScan(Connection connection,
       final Visitor visitor)
   throws IOException {
-    fullScan(hConnection, visitor, null);
+    fullScan(connection, visitor, null);
   }
 
   /**
    * Performs a full scan of <code>hbase:meta</code>.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @return List of {@link Result}
    * @throws IOException
    */
-  public static List<Result> fullScan(HConnection hConnection)
+  public static List<Result> fullScan(Connection connection)
     throws IOException {
     CollectAllVisitor v = new CollectAllVisitor();
-    fullScan(hConnection, v, null);
+    fullScan(connection, v, null);
     return v.getResults();
   }
 
   /**
    * Callers should call close on the returned {@link HTable} instance.
-   * @param hConnection connection we're using to access table
+   * @param connection connection we're using to access table
    * @param tableName Table to get an {@link 
org.apache.hadoop.hbase.client.HTable} against.
    * @return An {@link org.apache.hadoop.hbase.client.HTable} for 
<code>tableName</code>
    * @throws IOException
    * @SuppressWarnings("deprecation")
    */
-  private static Table getHTable(final HConnection hConnection,
+  private static Table getHTable(final Connection connection,
       final TableName tableName)
   throws IOException {
-    // We used to pass whole CatalogTracker in here, now we just pass in 
HConnection
-    if (hConnection == null || hConnection.isClosed()) {
+    // We used to pass whole CatalogTracker in here, now we just pass in 
Connection
+    if (connection == null || connection.isClosed()) {
       throw new NullPointerException("No connection");
     }
-    return new HTable(tableName, hConnection);
+    return new HTable(tableName, connection);
   }
 
   /**
    * Callers should call close on the returned {@link HTable} instance.
-   * @param hConnection connection we're using to access Meta
+   * @param connection connection we're using to access Meta
    * @return An {@link HTable} for <code>hbase:meta</code>
    * @throws IOException
    */
-  static Table getMetaHTable(final HConnection hConnection)
+  static Table getMetaHTable(final Connection connection)
   throws IOException {
-    return getHTable(hConnection, TableName.META_TABLE_NAME);
+    return getHTable(connection, TableName.META_TABLE_NAME);
   }
 
   /**
@@ -209,17 +209,17 @@ public class MetaTableAccessor {
 
   /**
    * Gets the region info and assignment for the specified region.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionName Region to lookup.
    * @return Location and HRegionInfo for <code>regionName</code>
    * @throws IOException
-   * @deprecated use {@link #getRegionLocation(HConnection, byte[])} instead
+   * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
    */
   @Deprecated
   public static Pair<HRegionInfo, ServerName> getRegion(
-    HConnection hConnection, byte [] regionName)
+    Connection connection, byte [] regionName)
     throws IOException {
-    HRegionLocation location = getRegionLocation(hConnection, regionName);
+    HRegionLocation location = getRegionLocation(connection, regionName);
     return location == null
       ? null
       : new Pair<HRegionInfo, ServerName>(location.getRegionInfo(), 
location.getServerName());
@@ -227,12 +227,12 @@ public class MetaTableAccessor {
 
   /**
    * Returns the HRegionLocation from meta for the given region
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionName region we're looking for
    * @return HRegionLocation for the given region
    * @throws IOException
    */
-  public static HRegionLocation getRegionLocation(HConnection hConnection,
+  public static HRegionLocation getRegionLocation(Connection connection,
                                                   byte[] regionName) throws 
IOException {
     byte[] row = regionName;
     HRegionInfo parsedInfo = null;
@@ -244,7 +244,7 @@ public class MetaTableAccessor {
     }
     Get get = new Get(row);
     get.addFamily(HConstants.CATALOG_FAMILY);
-    Result r = get(getMetaHTable(hConnection), get);
+    Result r = get(getMetaHTable(connection), get);
     RegionLocations locations = getRegionLocations(r);
     return locations == null
       ? null
@@ -253,17 +253,17 @@ public class MetaTableAccessor {
 
   /**
    * Returns the HRegionLocation from meta for the given region
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region information
    * @return HRegionLocation for the given region
    * @throws IOException
    */
-  public static HRegionLocation getRegionLocation(HConnection hConnection,
+  public static HRegionLocation getRegionLocation(Connection connection,
                                                   HRegionInfo regionInfo) 
throws IOException {
     byte[] row = getMetaKeyForRegion(regionInfo);
     Get get = new Get(row);
     get.addFamily(HConstants.CATALOG_FAMILY);
-    Result r = get(getMetaHTable(hConnection), get);
+    Result r = get(getMetaHTable(connection), get);
     return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
   }
 
@@ -287,16 +287,16 @@ public class MetaTableAccessor {
 
   /**
    * Gets the result in hbase:meta for the specified region.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionName region we're looking for
    * @return result of the specified region
    * @throws IOException
    */
-  public static Result getRegionResult(HConnection hConnection,
+  public static Result getRegionResult(Connection connection,
       byte[] regionName) throws IOException {
     Get get = new Get(regionName);
     get.addFamily(HConstants.CATALOG_FAMILY);
-    return get(getMetaHTable(hConnection), get);
+    return get(getMetaHTable(connection), get);
   }
 
   /**
@@ -305,8 +305,8 @@ public class MetaTableAccessor {
    * @throws IOException
    */
   public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(
-      HConnection hConnection, byte[] regionName) throws IOException {
-    Result result = getRegionResult(hConnection, regionName);
+      Connection connection, byte[] regionName) throws IOException {
+    Result result = getRegionResult(connection, regionName);
     HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
     HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
     if (mergeA == null && mergeB == null) {
@@ -318,12 +318,12 @@ public class MetaTableAccessor {
   /**
    * Checks if the specified table exists.  Looks at the hbase:meta table 
hosted on
    * the specified server.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table to check
    * @return true if the table exists in meta, false if not
    * @throws IOException
    */
-  public static boolean tableExists(HConnection hConnection,
+  public static boolean tableExists(Connection connection,
       final TableName tableName)
   throws IOException {
     if (tableName.equals(TableName.META_TABLE_NAME)) {
@@ -359,7 +359,7 @@ public class MetaTableAccessor {
         this.results.add(this.current);
       }
     };
-    fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
+    fullScan(connection, visitor, getTableStartRowForMeta(tableName));
     // If visitor has results >= 1 then table exists.
     return visitor.getResults().size() >= 1;
   }
@@ -367,32 +367,32 @@ public class MetaTableAccessor {
   /**
    * Gets all of the regions of the specified table. Do not use this method
    * to get meta table regions, use methods in MetaTableLocator instead.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table we're looking for
    * @return Ordered list of {@link HRegionInfo}.
    * @throws IOException
    */
-  public static List<HRegionInfo> getTableRegions(HConnection hConnection, 
TableName tableName)
+  public static List<HRegionInfo> getTableRegions(Connection connection, 
TableName tableName)
   throws IOException {
-    return getTableRegions(hConnection, tableName, false);
+    return getTableRegions(connection, tableName, false);
   }
 
   /**
    * Gets all of the regions of the specified table. Do not use this method
    * to get meta table regions, use methods in MetaTableLocator instead.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table we're looking for
    * @param excludeOfflinedSplitParents If true, do not include offlined split
    * parents in the return.
    * @return Ordered list of {@link HRegionInfo}.
    * @throws IOException
    */
-  public static List<HRegionInfo> getTableRegions(HConnection hConnection,
+  public static List<HRegionInfo> getTableRegions(Connection connection,
       TableName tableName, final boolean excludeOfflinedSplitParents)
       throws IOException {
     List<Pair<HRegionInfo, ServerName>> result;
     try {
-      result = getTableRegionsAndLocations(hConnection, tableName,
+      result = getTableRegionsAndLocations(connection, tableName,
         excludeOfflinedSplitParents);
     } catch (InterruptedException e) {
       throw (InterruptedIOException)new InterruptedIOException().initCause(e);
@@ -455,28 +455,28 @@ public class MetaTableAccessor {
 
   /**
    * Do not use this method to get meta table regions, use methods in 
MetaTableLocator instead.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table we're looking for
    * @return Return list of regioninfos and server.
    * @throws IOException
    * @throws InterruptedException
    */
   public static List<Pair<HRegionInfo, ServerName>>
-    getTableRegionsAndLocations(HConnection hConnection, TableName tableName)
+    getTableRegionsAndLocations(Connection connection, TableName tableName)
       throws IOException, InterruptedException {
-    return getTableRegionsAndLocations(hConnection, tableName, true);
+    return getTableRegionsAndLocations(connection, tableName, true);
   }
 
   /**
    * Do not use this method to get meta table regions, use methods in 
MetaTableLocator instead.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table to work with
    * @return Return list of regioninfos and server addresses.
    * @throws IOException
    * @throws InterruptedException
    */
   public static List<Pair<HRegionInfo, ServerName>> 
getTableRegionsAndLocations(
-        HConnection hConnection, final TableName tableName,
+        Connection connection, final TableName tableName,
       final boolean excludeOfflinedSplitParents) throws IOException, 
InterruptedException {
     if (tableName.equals(TableName.META_TABLE_NAME)) {
       throw new IOException("This method can't be used to locate meta regions;"
@@ -514,19 +514,19 @@ public class MetaTableAccessor {
           }
         }
       };
-    fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
+    fullScan(connection, visitor, getTableStartRowForMeta(tableName));
     return visitor.getResults();
   }
 
   /**
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param serverName server whose regions we're interested in
    * @return List of user regions installed on this server (does not include
    * catalog regions).
    * @throws IOException
    */
   public static NavigableMap<HRegionInfo, Result>
-  getServerUserRegions(HConnection hConnection, final ServerName serverName)
+  getServerUserRegions(Connection connection, final ServerName serverName)
     throws IOException {
     final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, 
Result>();
     // Fill the above hris map with entries from hbase:meta that have the 
passed
@@ -546,11 +546,11 @@ public class MetaTableAccessor {
         }
       }
     };
-    fullScan(hConnection, v);
+    fullScan(connection, v);
     return hris;
   }
 
-  public static void fullScanMetaAndPrint(HConnection hConnection)
+  public static void fullScanMetaAndPrint(Connection connection)
     throws IOException {
     Visitor v = new Visitor() {
       @Override
@@ -567,30 +567,30 @@ public class MetaTableAccessor {
         return true;
       }
     };
-    fullScan(hConnection, v);
+    fullScan(connection, v);
   }
 
   /**
    * Performs a full scan of a catalog table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param visitor Visitor invoked against each row.
    * @param startrow Where to start the scan. Pass null if want to begin scan
    * at first row.
    * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
    * @throws IOException
    */
-  public static void fullScan(HConnection hConnection,
+  public static void fullScan(Connection connection,
     final Visitor visitor, final byte [] startrow)
   throws IOException {
     Scan scan = new Scan();
     if (startrow != null) scan.setStartRow(startrow);
     if (startrow == null) {
-      int caching = hConnection.getConfiguration()
+      int caching = connection.getConfiguration()
           .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
       scan.setCaching(caching);
     }
     scan.addFamily(HConstants.CATALOG_FAMILY);
-    Table metaTable = getMetaHTable(hConnection);
+    Table metaTable = getMetaHTable(connection);
     ResultScanner scanner = null;
     try {
       scanner = metaTable.getScanner(scan);
@@ -925,13 +925,13 @@ public class MetaTableAccessor {
 
   /**
    * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param p Put to add to hbase:meta
    * @throws IOException
    */
-  static void putToMetaTable(final HConnection hConnection, final Put p)
+  static void putToMetaTable(final Connection connection, final Put p)
     throws IOException {
-    put(getMetaHTable(hConnection), p);
+    put(getMetaHTable(connection), p);
   }
 
   /**
@@ -949,13 +949,13 @@ public class MetaTableAccessor {
 
   /**
    * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param ps Put to add to hbase:meta
    * @throws IOException
    */
-  public static void putsToMetaTable(final HConnection hConnection, final 
List<Put> ps)
+  public static void putsToMetaTable(final Connection connection, final 
List<Put> ps)
     throws IOException {
-    Table t = getMetaHTable(hConnection);
+    Table t = getMetaHTable(connection);
     try {
       t.put(ps);
     } finally {
@@ -965,26 +965,26 @@ public class MetaTableAccessor {
 
   /**
    * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param d Delete to add to hbase:meta
    * @throws IOException
    */
-  static void deleteFromMetaTable(final HConnection hConnection, final Delete 
d)
+  static void deleteFromMetaTable(final Connection connection, final Delete d)
     throws IOException {
     List<Delete> dels = new ArrayList<Delete>(1);
     dels.add(d);
-    deleteFromMetaTable(hConnection, dels);
+    deleteFromMetaTable(connection, dels);
   }
 
   /**
    * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> 
table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param deletes Deletes to add to hbase:meta  This list should support 
#remove.
    * @throws IOException
    */
-  public static void deleteFromMetaTable(final HConnection hConnection, final 
List<Delete> deletes)
+  public static void deleteFromMetaTable(final Connection connection, final 
List<Delete> deletes)
     throws IOException {
-    Table t = getMetaHTable(hConnection);
+    Table t = getMetaHTable(connection);
     try {
       t.delete(deletes);
     } finally {
@@ -997,11 +997,11 @@ public class MetaTableAccessor {
    * @param metaRows rows in hbase:meta
    * @param replicaIndexToDeleteFrom the replica ID we would start deleting 
from
    * @param numReplicasToRemove how many replicas to remove
-   * @param hConnection connection we're using to access meta table
+   * @param connection connection we're using to access meta table
    * @throws IOException
    */
   public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
-    int replicaIndexToDeleteFrom, int numReplicasToRemove, HConnection 
hConnection)
+    int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection 
connection)
       throws IOException {
     int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
     for (byte[] row : metaRows) {
@@ -1014,20 +1014,20 @@ public class MetaTableAccessor {
         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
           getStartCodeColumn(i));
       }
-      deleteFromMetaTable(hConnection, deleteReplicaLocations);
+      deleteFromMetaTable(connection, deleteReplicaLocations);
     }
   }
 
   /**
    * Execute the passed <code>mutations</code> against <code>hbase:meta</code> 
table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param mutations Puts and Deletes to execute on hbase:meta
    * @throws IOException
    */
-  public static void mutateMetaTable(final HConnection hConnection,
+  public static void mutateMetaTable(final Connection connection,
                                      final List<Mutation> mutations)
     throws IOException {
-    Table t = getMetaHTable(hConnection);
+    Table t = getMetaHTable(connection);
     try {
       t.batch(mutations);
     } catch (InterruptedException e) {
@@ -1041,14 +1041,14 @@ public class MetaTableAccessor {
 
   /**
    * Adds a hbase:meta row for the specified new region.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region information
    * @throws IOException if problem connecting or updating meta
    */
-  public static void addRegionToMeta(HConnection hConnection,
+  public static void addRegionToMeta(Connection connection,
                                      HRegionInfo regionInfo)
     throws IOException {
-    putToMetaTable(hConnection, makePutFromRegionInfo(regionInfo));
+    putToMetaTable(connection, makePutFromRegionInfo(regionInfo));
     LOG.info("Added " + regionInfo.getRegionNameAsString());
   }
 
@@ -1067,7 +1067,7 @@ public class MetaTableAccessor {
    * Adds a (single) hbase:meta row for the specified new region and its 
daughters. Note that this
    * does not add its daughter's as different rows, but adds information about 
the daughters
    * in the same row as the parent. Use
-   * {@link #splitRegion(org.apache.hadoop.hbase.client.HConnection,
+   * {@link #splitRegion(org.apache.hadoop.hbase.client.Connection,
    *   HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
    * if you want to do that.
    * @param meta the HTable for META
@@ -1090,17 +1090,17 @@ public class MetaTableAccessor {
    * Adds a (single) hbase:meta row for the specified new region and its 
daughters. Note that this
    * does not add its daughter's as different rows, but adds information about 
the daughters
    * in the same row as the parent. Use
-   * {@link #splitRegion(HConnection, HRegionInfo, HRegionInfo, HRegionInfo, 
ServerName)}
+   * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, 
ServerName)}
    * if you want to do that.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region information
    * @param splitA first split daughter of the parent regionInfo
    * @param splitB second split daughter of the parent regionInfo
    * @throws IOException if problem connecting or updating meta
    */
-  public static void addRegionToMeta(HConnection hConnection, HRegionInfo 
regionInfo,
+  public static void addRegionToMeta(Connection connection, HRegionInfo 
regionInfo,
                                      HRegionInfo splitA, HRegionInfo splitB) 
throws IOException {
-    Table meta = getMetaHTable(hConnection);
+    Table meta = getMetaHTable(connection);
     try {
       addRegionToMeta(meta, regionInfo, splitA, splitB);
     } finally {
@@ -1110,11 +1110,11 @@ public class MetaTableAccessor {
 
   /**
    * Adds a hbase:meta row for each of the specified new regions.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfos region information list
    * @throws IOException if problem connecting or updating meta
    */
-  public static void addRegionsToMeta(HConnection hConnection,
+  public static void addRegionsToMeta(Connection connection,
                                       List<HRegionInfo> regionInfos)
     throws IOException {
     List<Put> puts = new ArrayList<Put>();
@@ -1123,7 +1123,7 @@ public class MetaTableAccessor {
         puts.add(makePutFromRegionInfo(regionInfo));
       }
     }
-    putsToMetaTable(hConnection, puts);
+    putsToMetaTable(connection, puts);
     LOG.info("Added " + puts.size());
   }
 
@@ -1133,7 +1133,7 @@ public class MetaTableAccessor {
    * @param sn the location of the region
    * @param openSeqNum the latest sequence number obtained when the region was 
open
    */
-  public static void addDaughter(final HConnection hConnection,
+  public static void addDaughter(final Connection connection,
       final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
       throws NotAllMetaRegionsOnlineException, IOException {
     Put put = new Put(regionInfo.getRegionName());
@@ -1141,7 +1141,7 @@ public class MetaTableAccessor {
     if (sn != null) {
       addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
     }
-    putToMetaTable(hConnection, put);
+    putToMetaTable(connection, put);
     LOG.info("Added daughter " + regionInfo.getEncodedName() +
       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
   }
@@ -1150,16 +1150,16 @@ public class MetaTableAccessor {
    * Merge the two regions into one in an atomic operation. Deletes the two
    * merging regions in hbase:meta and adds the merged region with the 
information of
    * two merging regions.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param mergedRegion the merged region
    * @param regionA
    * @param regionB
    * @param sn the location of the region
    * @throws IOException
    */
-  public static void mergeRegions(final HConnection hConnection, HRegionInfo 
mergedRegion,
+  public static void mergeRegions(final Connection connection, HRegionInfo 
mergedRegion,
       HRegionInfo regionA, HRegionInfo regionB, ServerName sn) throws 
IOException {
-    Table meta = getMetaHTable(hConnection);
+    Table meta = getMetaHTable(connection);
     try {
       HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
 
@@ -1190,16 +1190,16 @@ public class MetaTableAccessor {
    * region with the information that it is split into two, and also adds
    * the daughter regions. Does not add the location information to the 
daughter
    * regions since they are not open yet.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param parent the parent region which is split
    * @param splitA Split daughter region A
    * @param splitB Split daughter region A
    * @param sn the location of the region
    */
-  public static void splitRegion(final HConnection hConnection,
+  public static void splitRegion(final Connection connection,
                                  HRegionInfo parent, HRegionInfo splitA, 
HRegionInfo splitB,
                                  ServerName sn) throws IOException {
-    Table meta = getMetaHTable(hConnection);
+    Table meta = getMetaHTable(connection);
     try {
       HRegionInfo copyOfParent = new HRegionInfo(parent);
       copyOfParent.setOffline(true);
@@ -1260,15 +1260,15 @@ public class MetaTableAccessor {
    * Uses passed catalog tracker to get a connection to the server hosting
    * hbase:meta and makes edits to that region.
    *
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region to update location of
    * @param sn Server name
    * @throws IOException
    */
-  public static void updateRegionLocation(HConnection hConnection,
+  public static void updateRegionLocation(Connection connection,
                                           HRegionInfo regionInfo, ServerName 
sn, long updateSeqNum)
     throws IOException {
-    updateLocation(hConnection, regionInfo, sn, updateSeqNum);
+    updateLocation(connection, regionInfo, sn, updateSeqNum);
   }
 
   /**
@@ -1277,62 +1277,62 @@ public class MetaTableAccessor {
    * Connects to the specified server which should be hosting the specified
    * catalog region name to perform the edit.
    *
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region to update location of
    * @param sn Server name
    * @param openSeqNum the latest sequence number obtained when the region was 
open
    * @throws IOException In particular could throw {@link 
java.net.ConnectException}
    * if the server is down on other end.
    */
-  private static void updateLocation(final HConnection hConnection,
+  private static void updateLocation(final Connection connection,
                                      HRegionInfo regionInfo, ServerName sn, 
long openSeqNum)
     throws IOException {
     // region replicas are kept in the primary region's row
     Put put = new Put(getMetaKeyForRegion(regionInfo));
     addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
-    putToMetaTable(hConnection, put);
+    putToMetaTable(connection, put);
     LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
       " with server=" + sn);
   }
 
   /**
    * Deletes the specified region from META.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region to be deleted from META
    * @throws IOException
    */
-  public static void deleteRegion(HConnection hConnection,
+  public static void deleteRegion(Connection connection,
                                   HRegionInfo regionInfo)
     throws IOException {
     Delete delete = new Delete(regionInfo.getRegionName());
-    deleteFromMetaTable(hConnection, delete);
+    deleteFromMetaTable(connection, delete);
     LOG.info("Deleted " + regionInfo.getRegionNameAsString());
   }
 
   /**
    * Deletes the specified regions from META.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionsInfo list of regions to be deleted from META
    * @throws IOException
    */
-  public static void deleteRegions(HConnection hConnection,
+  public static void deleteRegions(Connection connection,
                                    List<HRegionInfo> regionsInfo) throws 
IOException {
     List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
     for (HRegionInfo hri: regionsInfo) {
       deletes.add(new Delete(hri.getRegionName()));
     }
-    deleteFromMetaTable(hConnection, deletes);
+    deleteFromMetaTable(connection, deletes);
     LOG.info("Deleted " + regionsInfo);
   }
 
   /**
    * Adds and Removes the specified regions from hbase:meta
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionsToRemove list of regions to be deleted from META
    * @param regionsToAdd list of regions to be added to META
    * @throws IOException
    */
-  public static void mutateRegions(HConnection hConnection,
+  public static void mutateRegions(Connection connection,
                                    final List<HRegionInfo> regionsToRemove,
                                    final List<HRegionInfo> regionsToAdd)
     throws IOException {
@@ -1347,7 +1347,7 @@ public class MetaTableAccessor {
         mutation.add(makePutFromRegionInfo(hri));
       }
     }
-    mutateMetaTable(hConnection, mutation);
+    mutateMetaTable(connection, mutation);
     if (regionsToRemove != null && regionsToRemove.size() > 0) {
       LOG.debug("Deleted " + regionsToRemove);
     }
@@ -1358,34 +1358,34 @@ public class MetaTableAccessor {
 
   /**
    * Overwrites the specified regions from hbase:meta
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfos list of regions to be added to META
    * @throws IOException
    */
-  public static void overwriteRegions(HConnection hConnection,
+  public static void overwriteRegions(Connection connection,
                                       List<HRegionInfo> regionInfos) throws 
IOException {
-    deleteRegions(hConnection, regionInfos);
+    deleteRegions(connection, regionInfos);
     // Why sleep? This is the easiest way to ensure that the previous deletes 
does not
     // eclipse the following puts, that might happen in the same ts from the 
server.
     // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is 
fixed,
     // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
     Threads.sleep(20);
-    addRegionsToMeta(hConnection, regionInfos);
+    addRegionsToMeta(connection, regionInfos);
     LOG.info("Overwritten " + regionInfos);
   }
 
   /**
    * Deletes merge qualifiers for the specified merged region.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param mergedRegion
    * @throws IOException
    */
-  public static void deleteMergeQualifiers(HConnection hConnection,
+  public static void deleteMergeQualifiers(Connection connection,
                                            final HRegionInfo mergedRegion) 
throws IOException {
     Delete delete = new Delete(mergedRegion.getRegionName());
     delete.deleteColumns(HConstants.CATALOG_FAMILY, 
HConstants.MERGEA_QUALIFIER);
     delete.deleteColumns(HConstants.CATALOG_FAMILY, 
HConstants.MERGEB_QUALIFIER);
-    deleteFromMetaTable(hConnection, delete);
+    deleteFromMetaTable(connection, delete);
     LOG.info("Deleted references in merged region "
       + mergedRegion.getRegionNameAsString() + ", qualifier="
       + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 1e569b8..9ffdddb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -54,7 +54,7 @@ class ConnectionAdapter implements ClusterConnection {
 
   private final ClusterConnection wrappedConnection;
 
-  public ConnectionAdapter(HConnection c) {
+  public ConnectionAdapter(Connection c) {
     wrappedConnection = (ClusterConnection)c;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 3269bd8..4c7f880 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -219,7 +219,7 @@ public class HBaseAdmin implements Admin {
    * @deprecated Do not use this internal ctor.
    */
   @Deprecated
-  public HBaseAdmin(HConnection connection)
+  public HBaseAdmin(Connection connection)
       throws MasterNotRunningException, ZooKeeperConnectionException {
     this((ClusterConnection)connection);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 8a6575e..0143b7e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -268,14 +268,14 @@ public class HTable implements HTableInterface, 
RegionLocator {
    * @deprecated Do not use, internal ctor.
    */
   @Deprecated
-  public HTable(final byte[] tableName, final HConnection connection,
+  public HTable(final byte[] tableName, final Connection connection,
       final ExecutorService pool) throws IOException {
     this(TableName.valueOf(tableName), connection, pool);
   }
 
   /** @deprecated Do not use, internal ctor. */
   @Deprecated
-  public HTable(TableName tableName, final HConnection connection,
+  public HTable(TableName tableName, final Connection connection,
       final ExecutorService pool) throws IOException {
     this(tableName, (ClusterConnection)connection, pool);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
index 89c8cef..cd494a4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
@@ -30,7 +30,7 @@ interface Registry {
   /**
    * @param connection
    */
-  void init(HConnection connection);
+  void init(Connection connection);
 
   /**
    * @return Meta region location

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
index 4d3cc3e..7cab2cc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
@@ -41,7 +41,7 @@ class ZooKeeperRegistry implements Registry {
   ConnectionManager.HConnectionImplementation hci;
 
   @Override
-  public void init(HConnection connection) {
+  public void init(Connection connection) {
     if (!(connection instanceof ConnectionManager.HConnectionImplementation)) {
       throw new RuntimeException("This registry depends on 
HConnectionImplementation");
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index c06dd99..6ad5ad6 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -115,7 +115,7 @@ public class TestClientNoCluster extends Configured 
implements Tool {
     final ServerName META_HOST = META_SERVERNAME;
 
     @Override
-    public void init(HConnection connection) {
+    public void init(Connection connection) {
     }
 
     @Override
@@ -694,8 +694,8 @@ public class TestClientNoCluster extends Configured 
implements Tool {
    * @param sharedConnection
    * @throws IOException
    */
-  static void cycle(int id, final Configuration c, final HConnection 
sharedConnection) throws IOException {
-    Table table = sharedConnection.getTable(BIG_USER_TABLE);
+  static void cycle(int id, final Configuration c, final Connection 
sharedConnection) throws IOException {
+    Table table = sharedConnection.getTable(TableName.valueOf(BIG_USER_TABLE));
     table.setAutoFlushTo(false);
     long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000);
     long startTime = System.currentTimeMillis();
@@ -772,7 +772,7 @@ public class TestClientNoCluster extends Configured 
implements Tool {
     final ExecutorService pool = 
Executors.newCachedThreadPool(Threads.getNamedThreadFactory("p"));
       // Executors.newFixedThreadPool(servers * 10, 
Threads.getNamedThreadFactory("p"));
     // Share a connection so I can keep counts in the 'server' on concurrency.
-    final HConnection sharedConnection = 
HConnectionManager.createConnection(getConf()/*, pool*/);
+    final Connection sharedConnection = 
ConnectionFactory.createConnection(getConf()/*, pool*/);
     try {
       Thread [] ts = new Thread[clients];
       for (int j = 0; j < ts.length; j++) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index cf31fec..d6f1b67 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
@@ -55,7 +55,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
   private static final Log LOG = 
LogFactory.getLog(SnapshotOfRegionAssignmentFromMeta.class
       .getName());
 
-  private final HConnection hConnection;
+  private final Connection connection;
 
   /** the table name to region map */
   private final Map<TableName, List<HRegionInfo>> tableToRegionMap;
@@ -72,13 +72,13 @@ public class SnapshotOfRegionAssignmentFromMeta {
   private final Set<TableName> disabledTables;
   private final boolean excludeOfflinedSplitParents;
 
-  public SnapshotOfRegionAssignmentFromMeta(HConnection hConnection) {
-    this(hConnection, new HashSet<TableName>(), false);
+  public SnapshotOfRegionAssignmentFromMeta(Connection connection) {
+    this(connection, new HashSet<TableName>(), false);
   }
 
-  public SnapshotOfRegionAssignmentFromMeta(HConnection hConnection, 
Set<TableName> disabledTables,
+  public SnapshotOfRegionAssignmentFromMeta(Connection connection, 
Set<TableName> disabledTables,
       boolean excludeOfflinedSplitParents) {
-    this.hConnection = hConnection;
+    this.connection = connection;
     tableToRegionMap = new HashMap<TableName, List<HRegionInfo>>();
     regionToRegionServerMap = new HashMap<HRegionInfo, ServerName>();
     regionServerToRegionMap = new HashMap<ServerName, List<HRegionInfo>>();
@@ -141,7 +141,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
       }
     };
     // Scan hbase:meta to pick up user regions
-    MetaTableAccessor.fullScan(hConnection, v);
+    MetaTableAccessor.fullScan(connection, v);
     //regionToRegionServerMap = regions;
     LOG.info("Finished to scan the hbase:meta for the current region 
assignment" +
       "snapshot");

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
index 83f75bd..01c1f89 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
@@ -87,12 +87,12 @@ public class FavoredNodeAssignmentHelper {
   /**
    * Update meta table with favored nodes info
    * @param regionToFavoredNodes map of HRegionInfo's to their favored nodes
-   * @param hConnection HConnection to be used
+   * @param connection connection to be used
    * @throws IOException
    */
   public static void updateMetaWithFavoredNodesInfo(
       Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
-      HConnection hConnection) throws IOException {
+      Connection connection) throws IOException {
     List<Put> puts = new ArrayList<Put>();
     for (Map.Entry<HRegionInfo, List<ServerName>> entry : 
regionToFavoredNodes.entrySet()) {
       Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
@@ -100,7 +100,7 @@ public class FavoredNodeAssignmentHelper {
         puts.add(put);
       }
     }
-    MetaTableAccessor.putsToMetaTable(hConnection, puts);
+    MetaTableAccessor.putsToMetaTable(connection, puts);
     LOG.info("Added " + puts.size() + " regions in META");
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
index 73cda7e..ff074e8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -109,7 +109,7 @@ public class RestoreSnapshotHandler extends 
TableEventHandler implements Snapsho
   @Override
   protected void handleTableOperation(List<HRegionInfo> hris) throws 
IOException {
     MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
-    HConnection conn = masterServices.getShortCircuitConnection();
+    Connection conn = masterServices.getShortCircuitConnection();
     FileSystem fs = fileSystemManager.getFileSystem();
     Path rootDir = fileSystemManager.getRootDir();
     TableName tableName = hTableDescriptor.getTableName();

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index cab3947..7ed7bec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -42,9 +42,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Row;
@@ -71,7 +71,7 @@ public class ReplicationSink {
 
   private static final Log LOG = LogFactory.getLog(ReplicationSink.class);
   private final Configuration conf;
-  private final HConnection sharedHtableCon;
+  private final Connection sharedHtableCon;
   private final MetricsSink metrics;
   private final AtomicLong totalReplicatedEdits = new AtomicLong();
 
@@ -87,7 +87,7 @@ public class ReplicationSink {
     this.conf = HBaseConfiguration.create(conf);
     decorateConf();
     this.metrics = new MetricsSink();
-    this.sharedHtableCon = HConnectionManager.createConnection(this.conf);
+    this.sharedHtableCon = ConnectionFactory.createConnection(this.conf);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index 2636fca..9690897 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.IOUtils;
@@ -313,13 +312,13 @@ public class RestoreSnapshotHelper {
       regionsToRestore.add(hri);
     }
 
-    public void updateMetaParentRegions(HConnection hConnection,
+    public void updateMetaParentRegions(Connection connection,
         final List<HRegionInfo> regionInfos) throws IOException {
       if (regionInfos == null || parentsMap.isEmpty()) return;
 
       // Extract region names and offlined regions
       Map<String, HRegionInfo> regionsByName = new HashMap<String, 
HRegionInfo>(regionInfos.size());
-      List<HRegionInfo> parentRegions = new LinkedList();
+      List<HRegionInfo> parentRegions = new LinkedList<>();
       for (HRegionInfo regionInfo: regionInfos) {
         if (regionInfo.isSplitParent()) {
           parentRegions.add(regionInfo);
@@ -344,7 +343,7 @@ public class RestoreSnapshotHelper {
         }
 
         LOG.debug("Update splits parent " + regionInfo.getEncodedName() + " -> 
" + daughters);
-        MetaTableAccessor.addRegionToMeta(hConnection, regionInfo,
+        MetaTableAccessor.addRegionToMeta(connection, regionInfo,
           regionsByName.get(daughters.getFirst()),
           regionsByName.get(daughters.getSecond()));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
index 67cef70..27b0048 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Row;
@@ -74,7 +75,7 @@ public class MultiHConnection {
     if (hConnections != null) {
       synchronized (hConnections) {
         if (hConnections != null) {
-          for (HConnection conn : hConnections) {
+          for (Connection conn : hConnections) {
             if (conn != null) {
               try {
                 conn.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 0885be9..6ef5926 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -56,12 +56,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -3374,7 +3375,7 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
     }
 
     int totalNumberOfRegions = 0;
-    HConnection unmanagedConnection = 
HConnectionManager.createConnection(conf);
+    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
     Admin admin = unmanagedConnection.getAdmin();
 
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 936b181..f547765 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -48,12 +48,12 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -255,7 +255,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
       ObjectMapper mapper = new ObjectMapper();
       TestOptions opts = mapper.readValue(value.toString(), TestOptions.class);
       Configuration conf = 
HBaseConfiguration.create(context.getConfiguration());
-      final HConnection con = HConnectionManager.createConnection(conf);
+      final Connection con = ConnectionFactory.createConnection(conf);
 
       // Evaluation task
       long elapsedTime = runOneClient(this.cmd, conf, con, opts, status);
@@ -379,7 +379,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     long[] timings = new long[opts.numClientThreads];
     ExecutorService pool = Executors.newFixedThreadPool(opts.numClientThreads,
       new ThreadFactoryBuilder().setNameFormat("TestClient-%s").build());
-    final HConnection con = HConnectionManager.createConnection(conf);
+    final Connection con = ConnectionFactory.createConnection(conf);
     for (int i = 0; i < threads.length; i++) {
       final int index = i;
       threads[i] = pool.submit(new Callable<Long>() {
@@ -922,7 +922,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     private final Status status;
     private final Sampler<?> traceSampler;
     private final SpanReceiverHost receiverHost;
-    protected HConnection connection;
+    protected Connection connection;
     protected Table table;
 
     private String testName;
@@ -934,7 +934,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
      * Note that all subclasses of this class must provide a public constructor
      * that has the exact same list of arguments.
      */
-    Test(final HConnection con, final TestOptions options, final Status 
status) {
+    Test(final Connection con, final TestOptions options, final Status status) 
{
       this.connection = con;
       this.conf = con ==  null? null: this.connection.getConfiguration();
       this.receiverHost = this.conf == null? null: 
SpanReceiverHost.getInstance(conf);
@@ -995,7 +995,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
     void testSetup() throws IOException {
       if (!opts.oneCon) {
-        this.connection = HConnectionManager.createConnection(conf);
+        this.connection = ConnectionFactory.createConnection(conf);
       }
       this.table = new HTable(TableName.valueOf(opts.tableName), connection);
       this.table.setAutoFlushTo(opts.autoFlush);
@@ -1135,7 +1135,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomSeekScanTest extends Test {
-    RandomSeekScanTest(HConnection con, TestOptions options, Status status) {
+    RandomSeekScanTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1165,7 +1165,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static abstract class RandomScanWithRangeTest extends Test {
-    RandomScanWithRangeTest(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRangeTest(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1209,7 +1209,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomScanWithRange10Test extends RandomScanWithRangeTest {
-    RandomScanWithRange10Test(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRange10Test(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1220,7 +1220,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomScanWithRange100Test extends RandomScanWithRangeTest {
-    RandomScanWithRange100Test(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRange100Test(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1231,7 +1231,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomScanWithRange1000Test extends RandomScanWithRangeTest {
-    RandomScanWithRange1000Test(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRange1000Test(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1242,7 +1242,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomScanWithRange10000Test extends RandomScanWithRangeTest {
-    RandomScanWithRange10000Test(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRange10000Test(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1257,7 +1257,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     private ArrayList<Get> gets;
     private Random rd = new Random();
 
-    RandomReadTest(HConnection con, TestOptions options, Status status) {
+    RandomReadTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
       consistency = options.replicas == DEFAULT_OPTS.replicas ? null : 
Consistency.TIMELINE;
       if (opts.multiGet > 0) {
@@ -1307,7 +1307,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomWriteTest extends Test {
-    RandomWriteTest(HConnection con, TestOptions options, Status status) {
+    RandomWriteTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1339,7 +1339,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   static class ScanTest extends Test {
     private ResultScanner testScanner;
 
-    ScanTest(HConnection con, TestOptions options, Status status) {
+    ScanTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1370,7 +1370,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class SequentialReadTest extends Test {
-    SequentialReadTest(HConnection con, TestOptions options, Status status) {
+    SequentialReadTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1386,7 +1386,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class SequentialWriteTest extends Test {
-    SequentialWriteTest(HConnection con, TestOptions options, Status status) {
+    SequentialWriteTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1418,7 +1418,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   static class FilteredScanTest extends Test {
     protected static final Log LOG = 
LogFactory.getLog(FilteredScanTest.class.getName());
 
-    FilteredScanTest(HConnection con, TestOptions options, Status status) {
+    FilteredScanTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1528,7 +1528,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
   }
 
-  static long runOneClient(final Class<? extends Test> cmd, Configuration 
conf, HConnection con,
+  static long runOneClient(final Class<? extends Test> cmd, Configuration 
conf, Connection con,
                            TestOptions opts, final Status status)
       throws IOException, InterruptedException {
     status.setStatus("Start " + cmd + " at offset " + opts.startRow + " for " +
@@ -1538,7 +1538,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     final Test t;
     try {
       Constructor<? extends Test> constructor =
-        cmd.getDeclaredConstructor(HConnection.class, TestOptions.class, 
Status.class);
+        cmd.getDeclaredConstructor(Connection.class, TestOptions.class, 
Status.class);
       t = constructor.newInstance(con, opts, status);
     } catch (NoSuchMethodException e) {
       throw new IllegalArgumentException("Invalid command class: " +

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
index 66a859e..548c072 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
@@ -31,9 +31,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
@@ -56,7 +56,7 @@ import org.junit.experimental.categories.Category;
 public class TestMetaTableAccessor {
   private static final Log LOG = 
LogFactory.getLog(TestMetaTableAccessor.class);
   private static final  HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static HConnection hConnection;
+  private static Connection connection;
 
   @BeforeClass public static void beforeClass() throws Exception {
     UTIL.startMiniCluster(3);
@@ -66,7 +66,7 @@ public class TestMetaTableAccessor {
     // responsive.  1 second is default as is ten retries.
     c.setLong("hbase.client.pause", 1000);
     c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
-    hConnection = HConnectionManager.getConnection(c);
+    connection = HConnectionManager.getConnection(c);
   }
 
   @AfterClass public static void afterClass() throws Exception {
@@ -74,7 +74,7 @@ public class TestMetaTableAccessor {
   }
 
   /**
-   * Does {@link MetaTableAccessor#getRegion(HConnection, byte[])} and a write
+   * Does {@link MetaTableAccessor#getRegion(Connection, byte[])} and a write
    * against hbase:meta while its hosted server is restarted to prove our 
retrying
    * works.
    * @throws IOException
@@ -89,18 +89,18 @@ public class TestMetaTableAccessor {
     int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
     // Test it works getting a region from just made user table.
     final List<HRegionInfo> regions =
-      testGettingTableRegions(hConnection, name, regionCount);
-    MetaTask reader = new MetaTask(hConnection, "reader") {
+      testGettingTableRegions(connection, name, regionCount);
+    MetaTask reader = new MetaTask(connection, "reader") {
       @Override
       void metaTask() throws Throwable {
-        testGetRegion(hConnection, regions.get(0));
+        testGetRegion(connection, regions.get(0));
         LOG.info("Read " + regions.get(0).getEncodedName());
       }
     };
-    MetaTask writer = new MetaTask(hConnection, "writer") {
+    MetaTask writer = new MetaTask(connection, "writer") {
       @Override
       void metaTask() throws Throwable {
-        MetaTableAccessor.addRegionToMeta(hConnection, regions.get(0));
+        MetaTableAccessor.addRegionToMeta(connection, regions.get(0));
         LOG.info("Wrote " + regions.get(0).getEncodedName());
       }
     };
@@ -157,11 +157,11 @@ public class TestMetaTableAccessor {
     boolean stop = false;
     int count = 0;
     Throwable t = null;
-    final HConnection hConnection;
+    final Connection connection;
 
-    MetaTask(final HConnection hConnection, final String name) {
+    MetaTask(final Connection connection, final String name) {
       super(name);
-      this.hConnection = hConnection;
+      this.connection = connection;
     }
 
     @Override
@@ -211,14 +211,14 @@ public class TestMetaTableAccessor {
   @Test public void testTableExists() throws IOException {
     final TableName name =
         TableName.valueOf("testTableExists");
-    assertFalse(MetaTableAccessor.tableExists(hConnection, name));
+    assertFalse(MetaTableAccessor.tableExists(connection, name));
     UTIL.createTable(name, HConstants.CATALOG_FAMILY);
-    assertTrue(MetaTableAccessor.tableExists(hConnection, name));
+    assertTrue(MetaTableAccessor.tableExists(connection, name));
     Admin admin = UTIL.getHBaseAdmin();
     admin.disableTable(name);
     admin.deleteTable(name);
-    assertFalse(MetaTableAccessor.tableExists(hConnection, name));
-    assertTrue(MetaTableAccessor.tableExists(hConnection,
+    assertFalse(MetaTableAccessor.tableExists(connection, name));
+    assertTrue(MetaTableAccessor.tableExists(connection,
       TableName.META_TABLE_NAME));
   }
 
@@ -227,7 +227,7 @@ public class TestMetaTableAccessor {
     LOG.info("Started " + name);
     // Test get on non-existent region.
     Pair<HRegionInfo, ServerName> pair =
-      MetaTableAccessor.getRegion(hConnection, 
Bytes.toBytes("nonexistent-region"));
+      MetaTableAccessor.getRegion(connection, 
Bytes.toBytes("nonexistent-region"));
     assertNull(pair);
     LOG.info("Finished " + name);
   }
@@ -252,27 +252,27 @@ public class TestMetaTableAccessor {
 
     // Now make sure we only get the regions from 1 of the tables at a time
 
-    assertEquals(1, MetaTableAccessor.getTableRegions(hConnection, 
name).size());
-    assertEquals(1, MetaTableAccessor.getTableRegions(hConnection, 
greaterName).size());
+    assertEquals(1, MetaTableAccessor.getTableRegions(connection, 
name).size());
+    assertEquals(1, MetaTableAccessor.getTableRegions(connection, 
greaterName).size());
   }
 
-  private static List<HRegionInfo> testGettingTableRegions(final HConnection 
hConnection,
+  private static List<HRegionInfo> testGettingTableRegions(final Connection 
connection,
       final TableName name, final int regionCount)
   throws IOException, InterruptedException {
-    List<HRegionInfo> regions = MetaTableAccessor.getTableRegions(hConnection, 
name);
+    List<HRegionInfo> regions = MetaTableAccessor.getTableRegions(connection, 
name);
     assertEquals(regionCount, regions.size());
     Pair<HRegionInfo, ServerName> pair =
-      MetaTableAccessor.getRegion(hConnection, regions.get(0).getRegionName());
+      MetaTableAccessor.getRegion(connection, regions.get(0).getRegionName());
     assertEquals(regions.get(0).getEncodedName(),
       pair.getFirst().getEncodedName());
     return regions;
   }
 
-  private static void testGetRegion(final HConnection hConnection,
+  private static void testGetRegion(final Connection connection,
       final HRegionInfo region)
   throws IOException, InterruptedException {
     Pair<HRegionInfo, ServerName> pair =
-      MetaTableAccessor.getRegion(hConnection, region.getRegionName());
+      MetaTableAccessor.getRegion(connection, region.getRegionName());
     assertEquals(region.getEncodedName(),
       pair.getFirst().getEncodedName());
   }
@@ -333,22 +333,22 @@ public class TestMetaTableAccessor {
     long seqNum100 = random.nextLong();
 
 
-    Table meta = MetaTableAccessor.getMetaHTable(hConnection);
+    Table meta = MetaTableAccessor.getMetaHTable(connection);
     try {
-      MetaTableAccessor.updateRegionLocation(hConnection, primary, 
serverName0, seqNum0);
+      MetaTableAccessor.updateRegionLocation(connection, primary, serverName0, 
seqNum0);
 
       // assert that the server, startcode and seqNum columns are there for 
the primary region
       assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 
0, true);
 
       // add replica = 1
-      MetaTableAccessor.updateRegionLocation(hConnection, replica1, 
serverName1, seqNum1);
+      MetaTableAccessor.updateRegionLocation(connection, replica1, 
serverName1, seqNum1);
       // check whether the primary is still there
       assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 
0, true);
       // now check for replica 1
       assertMetaLocation(meta, primary.getRegionName(), serverName1, seqNum1, 
1, true);
 
       // add replica = 1
-      MetaTableAccessor.updateRegionLocation(hConnection, replica100, 
serverName100, seqNum100);
+      MetaTableAccessor.updateRegionLocation(connection, replica100, 
serverName100, seqNum100);
       // check whether the primary is still there
       assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 
0, true);
       // check whether the replica 1 is still there

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
index de13b84..4a0d3a3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
@@ -78,7 +78,7 @@ public class TestClientTimeouts {
    */
   @Test
   public void testAdminTimeout() throws Exception {
-    HConnection lastConnection = null;
+    Connection lastConnection = null;
     boolean lastFailed = false;
     int initialInvocations = RandomTimeoutBlockingRpcChannel.invokations.get();
     RpcClient rpcClient = newRandomTimeoutRpcClient();
@@ -91,7 +91,7 @@ public class TestClientTimeouts {
         HBaseAdmin admin = null;
         try {
           admin = new HBaseAdmin(conf);
-          HConnection connection = admin.getConnection();
+          Connection connection = admin.getConnection();
           assertFalse(connection == lastConnection);
           lastConnection = connection;
           // Override the connection's rpc client for timeout testing

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index e28e944..610c815 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -4127,7 +4127,7 @@ public class TestFromClientSide {
   public void testUnmanagedHConnectionReconnect() throws Exception {
     final byte[] tableName = 
Bytes.toBytes("testUnmanagedHConnectionReconnect");
     HTable t = createUnmangedHConnectionHTable(tableName);
-    HConnection conn = t.getConnection();
+    Connection conn = t.getConnection();
     HBaseAdmin ha = new HBaseAdmin(conn);
     assertTrue(ha.tableExists(tableName));
     assertTrue(t.get(new Get(ROW)).isEmpty());

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index d86ca3f..ffe1e19 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -212,7 +212,7 @@ public class TestHCM {
    */
   @Test
   public void testAdminFactory() throws IOException {
-    HConnection con1 = 
HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
+    Connection con1 = 
ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
     Admin admin = con1.getAdmin();
     assertTrue(admin.getConnection() == con1);
     assertTrue(admin.getConfiguration() == TEST_UTIL.getConfiguration());
@@ -779,16 +779,16 @@ public class TestHCM {
   @Test
   public void testConnectionManagement() throws Exception{
     Table table0 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAM);
-    HConnection conn = 
HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
-    Table table = conn.getTable(TABLE_NAME1.getName());
+    Connection conn = 
ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+    HTable table = (HTable) conn.getTable(TABLE_NAME1);
     table.close();
     assertFalse(conn.isClosed());
-    assertFalse(((HTable)table).getPool().isShutdown());
-    table = conn.getTable(TABLE_NAME1.getName());
+    assertFalse(table.getPool().isShutdown());
+    table = (HTable) conn.getTable(TABLE_NAME1);
     table.close();
-    assertFalse(((HTable)table).getPool().isShutdown());
+    assertFalse(table.getPool().isShutdown());
     conn.close();
-    assertTrue(((HTable)table).getPool().isShutdown());
+    assertTrue(table.getPool().isShutdown());
     table0.close();
   }
 
@@ -846,14 +846,14 @@ public class TestHCM {
    */
   @Test
   public void testConnectionSameness() throws Exception {
-    HConnection previousConnection = null;
+    Connection previousConnection = null;
     for (int i = 0; i < 2; i++) {
       // set random key to differentiate the connection from previous ones
       Configuration configuration = TEST_UTIL.getConfiguration();
       configuration.set("some_key", String.valueOf(_randy.nextInt()));
       LOG.info("The hash code of the current configuration is: "
           + configuration.hashCode());
-      HConnection currentConnection = HConnectionManager
+      Connection currentConnection = HConnectionManager
           .getConnection(configuration);
       if (previousConnection != null) {
         assertTrue(
@@ -884,7 +884,7 @@ public class TestHCM {
     // to set up a session and test runs for a long time.
     int maxConnections = Math.min(zkmaxconnections - 1, 20);
     List<HConnection> connections = new ArrayList<HConnection>(maxConnections);
-    HConnection previousConnection = null;
+    Connection previousConnection = null;
     try {
       for (int i = 0; i < maxConnections; i++) {
         // set random key to differentiate the connection from previous ones
@@ -913,7 +913,7 @@ public class TestHCM {
         connections.add(currentConnection);
       }
     } finally {
-      for (HConnection c: connections) {
+      for (Connection c: connections) {
         // Clean up connections made so we don't interfere w/ subsequent tests.
         HConnectionManager.deleteConnection(c.getConfiguration());
       }
@@ -927,12 +927,12 @@ public class TestHCM {
     configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID,
         String.valueOf(_randy.nextInt()));
 
-    HConnection c1 = HConnectionManager.createConnection(configuration);
+    Connection c1 = ConnectionFactory.createConnection(configuration);
     // We create two connections with the same key.
-    HConnection c2 = HConnectionManager.createConnection(configuration);
+    Connection c2 = ConnectionFactory.createConnection(configuration);
 
-    HConnection c3 = HConnectionManager.getConnection(configuration);
-    HConnection c4 = HConnectionManager.getConnection(configuration);
+    Connection c3 = HConnectionManager.getConnection(configuration);
+    Connection c4 = HConnectionManager.getConnection(configuration);
     assertTrue(c3 == c4);
 
     c1.close();
@@ -946,7 +946,7 @@ public class TestHCM {
     c3.close();
     assertTrue(c3.isClosed());
     // c3 was removed from the cache
-    HConnection c5 = HConnectionManager.getConnection(configuration);
+    Connection c5 = HConnectionManager.getConnection(configuration);
     assertTrue(c5 != c3);
 
     assertFalse(c2.isClosed());
@@ -963,13 +963,13 @@ public class TestHCM {
   @Test
   public void testCreateConnection() throws Exception {
     Configuration configuration = TEST_UTIL.getConfiguration();
-    HConnection c1 = HConnectionManager.createConnection(configuration);
-    HConnection c2 = HConnectionManager.createConnection(configuration);
+    Connection c1 = ConnectionFactory.createConnection(configuration);
+    Connection c2 = ConnectionFactory.createConnection(configuration);
     // created from the same configuration, yet they are different
     assertTrue(c1 != c2);
     assertTrue(c1.getConfiguration() == c2.getConfiguration());
     // make sure these were not cached
-    HConnection c3 = HConnectionManager.getConnection(configuration);
+    Connection c3 = HConnectionManager.getConnection(configuration);
     assertTrue(c1 != c3);
     assertTrue(c2 != c3);
   }
@@ -1230,7 +1230,7 @@ public class TestHCM {
 
     // Use connection multiple times.
     for (int i = 0; i < 30; i++) {
-      HConnection c1 = null;
+      Connection c1 = null;
       try {
         c1 = ConnectionManager.getConnectionInternal(config);
         LOG.info("HTable connection " + i + " " + c1);
@@ -1273,7 +1273,7 @@ public class TestHCM {
     TableName tableName = 
TableName.valueOf("testConnectionRideOverClusterRestart");
     TEST_UTIL.createTable(tableName.getName(), new byte[][] {FAM_NAM}, 
config).close();
 
-    HConnection connection = HConnectionManager.createConnection(config);
+    Connection connection = ConnectionFactory.createConnection(config);
     Table table = connection.getTable(tableName);
 
     // this will cache the meta location and table's region location

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index 61967b7..8d38bdd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -43,10 +43,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
-import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
@@ -286,7 +285,7 @@ public class TestMasterOperationsForRegionReplicas {
   }
 
   private void validateNumberOfRowsInMeta(final TableName table, int 
numRegions,
-      HConnection hConnection) throws IOException {
+      Connection connection) throws IOException {
     assert(admin.tableExists(table));
     final AtomicInteger count = new AtomicInteger();
     Visitor visitor = new Visitor() {
@@ -296,14 +295,14 @@ public class TestMasterOperationsForRegionReplicas {
         return true;
       }
     };
-    MetaTableAccessor.fullScan(hConnection, visitor);
+    MetaTableAccessor.fullScan(connection, visitor);
     assert(count.get() == numRegions);
   }
 
   private void validateFromSnapshotFromMeta(HBaseTestingUtility util, 
TableName table,
-      int numRegions, int numReplica, HConnection hConnection) throws 
IOException {
+      int numRegions, int numReplica, Connection connection) throws 
IOException {
     SnapshotOfRegionAssignmentFromMeta snapshot = new 
SnapshotOfRegionAssignmentFromMeta(
-      hConnection);
+      connection);
     snapshot.initialize();
     Map<HRegionInfo, ServerName> regionToServerMap = 
snapshot.getRegionToRegionServerMap();
     assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for 
the namespace
@@ -327,10 +326,10 @@ public class TestMasterOperationsForRegionReplicas {
     }
   }
 
-  private void validateSingleRegionServerAssignment(HConnection hConnection, 
int numRegions,
+  private void validateSingleRegionServerAssignment(Connection connection, int 
numRegions,
       int numReplica) throws IOException {
     SnapshotOfRegionAssignmentFromMeta snapshot = new 
SnapshotOfRegionAssignmentFromMeta(
-      hConnection);
+      connection);
     snapshot.initialize();
     Map<HRegionInfo, ServerName>  regionToServerMap = 
snapshot.getRegionToRegionServerMap();
     assertEquals(regionToServerMap.size(), numRegions * numReplica + 1); //'1' 
for the namespace

http://git-wip-us.apache.org/repos/asf/hbase/blob/3beb168b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
index e032730..b54c38c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.RpcRetryingCaller;
@@ -148,7 +150,7 @@ public class TestRegionReplicaReplicationEndpoint {
     HTU.deleteTableIfAny(tableNameNoReplicas);
     HTU.createTable(tableNameNoReplicas, HBaseTestingUtility.fam1);
 
-    HConnection connection = 
HConnectionManager.createConnection(HTU.getConfiguration());
+    Connection connection = 
ConnectionFactory.createConnection(HTU.getConfiguration());
     Table table = connection.getTable(tableName);
     Table tableNoReplicas = connection.getTable(tableNameNoReplicas);
 
@@ -235,7 +237,7 @@ public class TestRegionReplicaReplicationEndpoint {
     HTU.getHBaseAdmin().createTable(htd);
 
 
-    HConnection connection = 
HConnectionManager.createConnection(HTU.getConfiguration());
+    Connection connection = 
ConnectionFactory.createConnection(HTU.getConfiguration());
     Table table = connection.getTable(tableName);
 
     try {

Reply via email to