Repository: hbase
Updated Branches:
  refs/heads/master 3e777898a -> 9bdb81f0a


HBASE-12887 Cleanup many checkstyle errors in o.a.h.h.client


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9bdb81f0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9bdb81f0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9bdb81f0

Branch: refs/heads/master
Commit: 9bdb81f0a1db308a8a452379455b6bbfe70ea20d
Parents: 3e77789
Author: Nick Dimiduk <[email protected]>
Authored: Tue Jan 20 12:44:36 2015 -0800
Committer: Nick Dimiduk <[email protected]>
Committed: Tue Jan 20 12:44:36 2015 -0800

----------------------------------------------------------------------
 .../hbase/classification/InterfaceAudience.java |  2 +-
 ...ExcludePrivateAnnotationsStandardDoclet.java |  4 +-
 .../IncludePublicAnnotationsStandardDoclet.java |  4 +-
 .../classification/tools/RootDocProcessor.java  |  8 +-
 .../classification/tools/StabilityOptions.java  |  5 +-
 .../hadoop/hbase/client/AsyncProcess.java       | 27 ++++---
 .../hadoop/hbase/client/ClientIdGenerator.java  |  4 +-
 .../hadoop/hbase/client/ClientScanner.java      |  7 +-
 .../client/ClientSmallReversedScanner.java      |  3 +-
 .../hadoop/hbase/client/ClusterConnection.java  | 11 ++-
 .../hbase/client/ClusterStatusListener.java     |  1 -
 .../hadoop/hbase/client/ConnectionManager.java  | 64 ++++++++++++----
 .../hadoop/hbase/client/ConnectionUtils.java    |  4 +-
 .../apache/hadoop/hbase/client/FailureInfo.java |  9 +--
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 11 +--
 .../hadoop/hbase/client/HConnectable.java       |  2 +-
 .../apache/hadoop/hbase/client/HConnection.java | 77 +++++++++++++++-----
 .../hadoop/hbase/client/HConnectionManager.java | 26 ++++---
 .../org/apache/hadoop/hbase/client/HTable.java  | 25 ++++---
 .../hadoop/hbase/client/HTableMultiplexer.java  | 18 ++---
 .../apache/hadoop/hbase/client/MetaCache.java   |  6 +-
 .../apache/hadoop/hbase/client/MetaScanner.java |  5 +-
 .../apache/hadoop/hbase/client/MultiAction.java |  3 +-
 .../hbase/client/MultiServerCallable.java       |  2 +-
 .../apache/hadoop/hbase/client/Mutation.java    |  2 +-
 .../hadoop/hbase/client/RegistryFactory.java    |  7 +-
 .../org/apache/hadoop/hbase/client/Result.java  | 28 +++----
 .../RetriesExhaustedWithDetailsException.java   |  2 +-
 .../hbase/client/ReversedScannerCallable.java   |  3 +-
 .../hadoop/hbase/client/ScannerCallable.java    |  4 +-
 .../hbase/client/ServerStatisticTracker.java    |  3 +-
 .../client/UnmodifyableHTableDescriptor.java    |  8 --
 .../hadoop/hbase/client/HTableWrapper.java      | 17 ++++-
 33 files changed, 267 insertions(+), 135 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
----------------------------------------------------------------------
diff --git 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
index a76b2d9..6e67758 100644
--- 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
+++ 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
@@ -44,7 +44,7 @@ import java.lang.annotation.RetentionPolicy;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class InterfaceAudience {
+public final class InterfaceAudience {
   /**
    * Intended for use by any project or application.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
----------------------------------------------------------------------
diff --git 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
index f93e13f..221f730 100644
--- 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
+++ 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
@@ -31,7 +31,9 @@ import com.sun.tools.doclets.standard.Standard;
  * It delegates to the Standard Doclet, and takes the same options.
  */
 @InterfaceAudience.Private
-public class ExcludePrivateAnnotationsStandardDoclet {
+public final class ExcludePrivateAnnotationsStandardDoclet {
+
+  private ExcludePrivateAnnotationsStandardDoclet() {}
 
   public static LanguageVersion languageVersion() {
     return LanguageVersion.JAVA_1_5;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java
----------------------------------------------------------------------
diff --git 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java
 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java
index def4f1a..5f1079e 100644
--- 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java
+++ 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java
@@ -36,7 +36,9 @@ import com.sun.tools.doclets.standard.Standard;
  * It delegates to the Standard Doclet, and takes the same options.
  */
 @InterfaceAudience.Private
-public class IncludePublicAnnotationsStandardDoclet {
+public final class IncludePublicAnnotationsStandardDoclet {
+
+  private IncludePublicAnnotationsStandardDoclet() {}
 
   public static LanguageVersion languageVersion() {
     return LanguageVersion.JAVA_1_5;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
----------------------------------------------------------------------
diff --git 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
index 2ea1022..97d9343 100644
--- 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
+++ 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
@@ -47,11 +47,13 @@ import 
org.apache.hadoop.hbase.classification.InterfaceStability;
  * <p>
  * Based on code from 
http://www.sixlegs.com/blog/java/exclude-javadoc-tag.html.
  */
-class RootDocProcessor {
+final class RootDocProcessor {
 
   static String stability = StabilityOptions.UNSTABLE_OPTION;
   static boolean treatUnannotatedClassesAsPrivate = false;
 
+  private RootDocProcessor() {}
+
   public static RootDoc process(RootDoc root) {
     return (RootDoc) process(root, RootDoc.class);
   }
@@ -215,7 +217,9 @@ class RootDocProcessor {
     }
 
     private Object unwrap(Object proxy) {
-      if (proxy instanceof Proxy) return ((ExcludeHandler) 
Proxy.getInvocationHandler(proxy)).target;
+      if (proxy instanceof Proxy) {
+        return ((ExcludeHandler) Proxy.getInvocationHandler(proxy)).target;
+      }
       return proxy;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
----------------------------------------------------------------------
diff --git 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
index 809d96c..71af5d2 100644
--- 
a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
+++ 
b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
@@ -22,7 +22,10 @@ import com.sun.javadoc.DocErrorReporter;
 import java.util.ArrayList;
 import java.util.List;
 
-class StabilityOptions {
+final class StabilityOptions {
+
+  private StabilityOptions() {}
+
   public static final String STABLE_OPTION = "-stable";
   public static final String EVOLVING_OPTION = "-evolving";
   public static final String UNSTABLE_OPTION = "-unstable";

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 8b1db8f..826c91f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -127,7 +127,7 @@ class AsyncProcess {
 
   /** Return value from a submit that didn't contain any requests. */
   private static final AsyncRequestFuture NO_REQS_RESULT = new 
AsyncRequestFuture() {
-    public final Object[] result = new Object[0];
+    final Object[] result = new Object[0];
     @Override
     public boolean hasError() { return false; }
     @Override
@@ -243,7 +243,8 @@ class AsyncProcess {
   }
 
   public AsyncProcess(ClusterConnection hc, Configuration conf, 
ExecutorService pool,
-      RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, 
RpcControllerFactory rpcFactory) {
+      RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors,
+      RpcControllerFactory rpcFactory) {
     if (hc == null) {
       throw new IllegalArgumentException("HConnection cannot be null.");
     }
@@ -311,7 +312,7 @@ class AsyncProcess {
   }
 
   /**
-   * See {@link #submit(ExecutorService, TableName, List, boolean, 
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback, boolean)}.
+   * See {@link #submit(ExecutorService, TableName, List, boolean, 
Batch.Callback, boolean)}.
    * Uses default ExecutorService for this AP (must have been created with 
one).
    */
   public <CResult> AsyncRequestFuture submit(TableName tableName, List<? 
extends Row> rows,
@@ -514,7 +515,7 @@ class AsyncProcess {
   }
 
   /**
-   * See {@link #submitAll(ExecutorService, TableName, List, 
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback, Object[])}.
+   * See {@link #submitAll(ExecutorService, TableName, List, Batch.Callback, 
Object[])}.
    * Uses default ExecutorService for this AP (must have been created with 
one).
    */
   public <CResult> AsyncRequestFuture submitAll(TableName tableName,
@@ -1345,11 +1346,11 @@ class AsyncProcess {
       if (results == null) {
          decActionCounter(index);
          return; // Simple case, no replica requests.
-      } else if ((state = trySetResultSimple(
-          index, action.getAction(), false, result, null, isStale)) == null) {
+      }
+      state = trySetResultSimple(index, action.getAction(), false, result, 
null, isStale);
+      if (state == null) {
         return; // Simple case, no replica requests.
       }
-      assert state != null;
       // At this point we know that state is set to replica tracking class.
       // It could be that someone else is also looking at it; however, we know 
there can
       // only be one state object, and only one thread can set callCount to 0. 
Other threads
@@ -1385,11 +1386,11 @@ class AsyncProcess {
         errors.add(throwable, row, server);
         decActionCounter(index);
         return; // Simple case, no replica requests.
-      } else if ((state = trySetResultSimple(
-          index, row, true, throwable, server, false)) == null) {
+      }
+      state = trySetResultSimple(index, row, true, throwable, server, false);
+      if (state == null) {
         return; // Simple case, no replica requests.
       }
-      assert state != null;
       BatchErrors target = null; // Error will be added to final errors, or 
temp replica errors.
       boolean isActionDone = false;
       synchronized (state) {
@@ -1455,7 +1456,8 @@ class AsyncProcess {
         results[index] = result;
       } else {
         synchronized (replicaResultLock) {
-          if ((resObj = results[index]) == null) {
+          resObj = results[index];
+          if (resObj == null) {
             if (isFromReplica) {
               throw new AssertionError("Unexpected stale result for " + row);
             }
@@ -1720,7 +1722,8 @@ class AsyncProcess {
   }
 
   /**
-   * For manageError. Only used to make logging more clear, we don't actually 
care why we don't retry.
+   * For {@link AsyncRequestFutureImpl#manageError(int, Row, Retry, Throwable, 
ServerName)}. Only
+   * used to make logging more clear, we don't actually care why we don't 
retry.
    */
   private enum Retry {
     YES,

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
index ac6c82e..7c859a1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
@@ -32,9 +32,11 @@ import org.apache.hadoop.hbase.util.Bytes;
  * such as an IP address, PID, and composite deterministic ID.
  */
 @InterfaceAudience.Private
-class ClientIdGenerator {
+final class ClientIdGenerator {
   static final Log LOG = LogFactory.getLog(ClientIdGenerator.class);
 
+  private ClientIdGenerator() {}
+
   /**
    * @return a unique ID incorporating IP address, PID, TID and timer. Might 
be an overkill...
    * Note though that new UUID in java by default is just a random number.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index afc9bc4..d31642a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -465,9 +465,12 @@ public class ClientScanner extends AbstractClientScanner {
            // We used to catch this error, interpret, and rethrow. However, we
            // have since decided that it's not nice for a scanner's close to
            // throw exceptions. Chances are it was just due to lease time out.
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("scanner failed to close", e);
+          }
         } catch (IOException e) {
-           /* An exception other than UnknownScanner is unexpected. */
-           LOG.warn("scanner failed to close. Exception follows: " + e);
+          /* An exception other than UnknownScanner is unexpected. */
+          LOG.warn("scanner failed to close.", e);
         }
         callable = null;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
index 86ff424..35b3d88 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
@@ -62,7 +62,8 @@ public class ClientSmallReversedScanner extends 
ReversedClientScanner {
       final TableName tableName, ClusterConnection connection,
       RpcRetryingCallerFactory rpcFactory, RpcControllerFactory 
controllerFactory,
       ExecutorService pool, int primaryOperationTimeout) throws IOException {
-    super(conf, scan, tableName, connection, rpcFactory, controllerFactory, 
pool, primaryOperationTimeout);
+    super(conf, scan, tableName, connection, rpcFactory, controllerFactory, 
pool,
+        primaryOperationTimeout);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 45b99eb..f0398f9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -40,8 +40,10 @@ import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
 // classes and unit tests only.
 public interface ClusterConnection extends HConnection {
 
-  /** @return - true if the master server is running
-   * @deprecated this has been deprecated without a replacement */
+  /**
+   * @return - true if the master server is running
+   * @deprecated this has been deprecated without a replacement
+   */
   @Override
   @Deprecated
   boolean isMasterRunning()
@@ -194,8 +196,8 @@ public interface ClusterConnection extends HConnection {
   * @return region locations for this row.
   * @throws IOException
   */
- RegionLocations locateRegion(TableName tableName,
-                              byte[] row, boolean useCache, boolean retry, int 
replicaId) throws IOException;
+ RegionLocations locateRegion(TableName tableName, byte[] row, boolean 
useCache, boolean retry,
+     int replicaId) throws IOException;
 
   /**
    * Returns a {@link MasterKeepAliveConnection} to the active master
@@ -250,6 +252,7 @@ public interface ClusterConnection extends HConnection {
    * connection.
    * @return The shared instance. Never returns null.
    * @throws MasterNotRunningException
+   * @deprecated Since 0.96.0
    */
   @Override
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
index 2e2ea65..5756232 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
@@ -37,7 +37,6 @@ import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.net.InetAddress;
 import java.net.NetworkInterface;
-import java.net.Inet6Address;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 5db92eb..166bcdd 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -189,7 +189,7 @@ import com.google.protobuf.ServiceException;
 @SuppressWarnings("serial")
 @InterfaceAudience.Private
 // NOTE: DO NOT make this class public. It was made package-private on purpose.
-class ConnectionManager {
+final class ConnectionManager {
   static final Log LOG = LogFactory.getLog(ConnectionManager.class);
 
   public static final String RETRIES_BY_SERVER_KEY = 
"hbase.client.retries.by.server";
@@ -269,6 +269,7 @@ class ConnectionManager {
    * @param conf configuration
    * @return HConnection object for <code>conf</code>
    * @throws ZooKeeperConnectionException
+   * @deprecated connection caching is going away.
    */
   @Deprecated
   public static HConnection getConnection(final Configuration conf) throws 
IOException {
@@ -400,6 +401,9 @@ class ConnectionManager {
     return createConnection(conf, false, pool, user);
   }
 
+  /**
+   * @deprecated instead use one of the {@link 
ConnectionFactory#createConnection()} methods.
+   */
   @Deprecated
   static HConnection createConnection(final Configuration conf, final boolean 
managed)
       throws IOException {
@@ -407,6 +411,9 @@ class ConnectionManager {
     return createConnection(conf, managed, null, provider.getCurrent());
   }
 
+  /**
+   * @deprecated instead use one of the {@link 
ConnectionFactory#createConnection()} methods.
+   */
   @Deprecated
   static ClusterConnection createConnection(final Configuration conf, final 
boolean managed,
       final ExecutorService pool, final User user)
@@ -420,7 +427,7 @@ class ConnectionManager {
    * then close connection to the zookeeper ensemble and let go of all 
associated resources.
    *
    * @param conf configuration whose identity is used to find {@link 
HConnection} instance.
-   * @deprecated
+   * @deprecated connection caching is going away.
    */
   @Deprecated
   public static void deleteConnection(Configuration conf) {
@@ -432,7 +439,7 @@ class ConnectionManager {
    * This will then close connection to the zookeeper ensemble and let go of 
all resources.
    *
    * @param connection
-   * @deprecated
+   * @deprecated connection caching is going away.
    */
   @Deprecated
   public static void deleteStaleConnection(HConnection connection) {
@@ -443,7 +450,7 @@ class ConnectionManager {
    * Delete information for all connections. Close or not the connection, 
depending on the
    *  staleConnection boolean and the ref count. By default, you should use it 
with
    *  staleConnection to true.
-   * @deprecated
+   * @deprecated connection caching is going away.
    */
   @Deprecated
   public static void deleteAllConnections(boolean staleConnection) {
@@ -466,7 +473,9 @@ class ConnectionManager {
     deleteAllConnections(false);
   }
 
-
+  /**
+   * @deprecated connection caching is going away.
+   */
   @Deprecated
   private static void deleteConnection(HConnection connection, boolean 
staleConnection) {
     synchronized (CONNECTION_INSTANCES) {
@@ -479,6 +488,9 @@ class ConnectionManager {
     }
   }
 
+  /**
+   * @deprecated connection caching is going away.
+˙   */
   @Deprecated
   private static void deleteConnection(HConnectionKey connectionKey, boolean 
staleConnection) {
     synchronized (CONNECTION_INSTANCES) {
@@ -843,6 +855,7 @@ class ConnectionManager {
      * @return true if the master is running, throws an exception otherwise
      * @throws MasterNotRunningException - if the master is not running
      * @throws ZooKeeperConnectionException
+     * @deprecated this has been deprecated without a replacement
      */
     @Deprecated
     @Override
@@ -989,7 +1002,7 @@ class ConnectionManager {
     @Override
     public List<HRegionLocation> locateRegions(final TableName tableName)
     throws IOException {
-      return locateRegions (tableName, false, true);
+      return locateRegions(tableName, false, true);
     }
 
     @Override
@@ -1345,7 +1358,7 @@ class ConnectionManager {
       int userCount;
       long keepAliveUntil = Long.MAX_VALUE;
 
-      MasterServiceState (final HConnection connection) {
+      MasterServiceState(final HConnection connection) {
         super();
         this.connection = connection;
       }
@@ -1584,7 +1597,7 @@ class ConnectionManager {
       if (zkw == null){
         return;
       }
-      if (keepAliveZookeeperUserCount.addAndGet(-1) <= 0 ){
+      if (keepAliveZookeeperUserCount.addAndGet(-1) <= 0) {
         keepZooKeeperWatcherAliveUntil = System.currentTimeMillis() + 
keepAlive;
       }
     }
@@ -1596,7 +1609,7 @@ class ConnectionManager {
      *  {@link #keepZooKeeperWatcherAliveUntil}). Keep alive time is
      *  managed by the release functions and the variable {@link #keepAlive}
      */
-    private static class DelayedClosing extends Chore implements Stoppable {
+    private static final class DelayedClosing extends Chore implements 
Stoppable {
       private HConnectionImplementation hci;
       Stoppable stoppable;
 
@@ -2067,7 +2080,7 @@ class ConnectionManager {
         final Object exception, final HRegionLocation source) {
       assert source != null;
       updateCachedLocations(tableName, source.getRegionInfo().getRegionName()
-        , rowkey, exception, source.getServerName());
+          , rowkey, exception, source.getServerName());
     }
 
     /**
@@ -2144,6 +2157,9 @@ class ConnectionManager {
       updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, 
source);
     }
 
+    /**
+     * @deprecated since 0.96 - Use {@link HTableInterface#batch} instead
+     */
     @Override
     @Deprecated
     public void processBatch(List<? extends Row> list,
@@ -2160,6 +2176,9 @@ class ConnectionManager {
       processBatchCallback(list, tableName, pool, results, null);
     }
 
+    /**
+     * @deprecated Unsupported API
+     */
     @Override
     @Deprecated
     public void processBatch(List<? extends Row> list,
@@ -2194,6 +2213,9 @@ class ConnectionManager {
       }
     }
 
+    /**
+     * @deprecated Unsupported API
+     */
     @Override
     @Deprecated
     public <R> void processBatchCallback(
@@ -2238,23 +2260,35 @@ class ConnectionManager {
       return metaCache.getNumberOfCachedRegionLocations(tableName);
     }
 
+    /**
+     * @deprecated always return false since 0.99
+     */
     @Override
     @Deprecated
     public void setRegionCachePrefetch(final TableName tableName, final 
boolean enable) {
     }
 
+    /**
+     * @deprecated always return false since 0.99
+     */
     @Override
     @Deprecated
     public void setRegionCachePrefetch(final byte[] tableName,
         final boolean enable) {
     }
 
+    /**
+     * @deprecated always return false since 0.99
+     */
     @Override
     @Deprecated
     public boolean getRegionCachePrefetch(TableName tableName) {
       return false;
     }
 
+    /**
+     * @deprecated always return false since 0.99
+     */
     @Override
     @Deprecated
     public boolean getRegionCachePrefetch(byte[] tableName) {
@@ -2400,7 +2434,7 @@ class ConnectionManager {
     @Override
     public String[] getTableNames() throws IOException {
       TableName[] tableNames = listTableNames();
-      String result[] = new String[tableNames.length];
+      String[] result = new String[tableNames.length];
       for (int i = 0; i < tableNames.length; i++) {
         result[i] = tableNames[i].getNameAsString();
       }
@@ -2567,7 +2601,7 @@ class ConnectionManager {
       long result;
       ServerErrors errorStats = errorsByServer.get(server);
       if (errorStats != null) {
-        result = ConnectionUtils.getPauseTime(basePause, 
errorStats.retries.get());
+        result = ConnectionUtils.getPauseTime(basePause, 
errorStats.getCount());
       } else {
         result = 0; // yes, if the server is not in our list we don't wait 
before retrying.
       }
@@ -2599,7 +2633,11 @@ class ConnectionManager {
      * The record of errors for a server.
      */
     private static class ServerErrors {
-      public final AtomicInteger retries = new AtomicInteger(0);
+      private final AtomicInteger retries = new AtomicInteger(0);
+
+      public int getCount() {
+        return retries.get();
+      }
 
       public void addError() {
         retries.incrementAndGet();

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 4d6a36c..dae2499 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -37,7 +37,9 @@ import com.google.common.annotations.VisibleForTesting;
  * Utility used by client connections.
  */
 @InterfaceAudience.Private
-public class ConnectionUtils {
+public final class ConnectionUtils {
+
+  private ConnectionUtils() {}
 
   private static final Random RANDOM = new Random();
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java
index 16707cb..b243684 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java
@@ -32,18 +32,17 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 @InterfaceAudience.Private
 class FailureInfo {
   // The number of consecutive failures.
-  public final AtomicLong numConsecutiveFailures = new AtomicLong();
+  final AtomicLong numConsecutiveFailures = new AtomicLong();
   // The time when the server started to become unresponsive
   // Once set, this would never be updated.
-  public final long timeOfFirstFailureMilliSec;
+  final long timeOfFirstFailureMilliSec;
   // The time when the client last tried to contact the server.
   // This is only updated by one client at a time
-  public volatile long timeOfLatestAttemptMilliSec;
+  volatile long timeOfLatestAttemptMilliSec;
   // Used to keep track of concurrent attempts to contact the server.
   // In Fast fail mode, we want just one client thread to try to connect
   // the rest of the client threads will fail fast.
-  public final AtomicBoolean exclusivelyRetringInspiteOfFastFail = new 
AtomicBoolean(
-      false);
+  final AtomicBoolean exclusivelyRetringInspiteOfFastFail = new 
AtomicBoolean(false);
 
   @Override
   public String toString() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 5a9ca74..5ca6915 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -339,7 +339,7 @@ public class HBaseAdmin implements Admin {
   @Deprecated
   public String[] getTableNames() throws IOException {
     TableName[] tableNames = listTableNames();
-    String result[] = new String[tableNames.length];
+    String[] result = new String[tableNames.length];
     for (int i = 0; i < tableNames.length; i++) {
       result[i] = tableNames[i].getNameAsString();
     }
@@ -356,7 +356,7 @@ public class HBaseAdmin implements Admin {
   @Deprecated
   public String[] getTableNames(Pattern pattern) throws IOException {
     TableName[] tableNames = listTableNames(pattern);
-    String result[] = new String[tableNames.length];
+    String[] result = new String[tableNames.length];
     for (int i = 0; i < tableNames.length; i++) {
       result[i] = tableNames[i].getNameAsString();
     }
@@ -2902,8 +2902,8 @@ public class HBaseAdmin implements Admin {
                         final byte[] tableName) throws IOException,
     * <p>
     * Snapshots are considered unique based on <b>the name of the 
snapshot</b>. Attempts to take a
-    * snapshot with the same name (even a different type or with different 
parameters) will fail with
-    * a {@link SnapshotCreationException} indicating the duplicate naming.
+    * snapshot with the same name (even a different type or with different 
parameters) will fail
+    * with a {@link SnapshotCreationException} indicating the duplicate naming.
     * <p>
     * Snapshot names follow the same naming constraints as tables in HBase.
     * @param snapshotName name of the snapshot to be created
@@ -3473,7 +3473,8 @@ public class HBaseAdmin implements Admin {
         // sleep a backoff <= pauseTime amount
         long sleep = getPauseTime(tries++);
         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
-        LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for 
snapshot restore to complete.");
+        LOG.debug(tries + ") Sleeping: " + sleep
+            + " ms while we wait for snapshot restore to complete.");
         Thread.sleep(sleep);
       } catch (InterruptedException e) {
         throw (InterruptedIOException)new 
InterruptedIOException("Interrupted").initCause(e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java
index c4f7b10..f5f841d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java
@@ -40,7 +40,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public abstract class HConnectable<T> {
-  public Configuration conf;
+  protected Configuration conf;
 
   protected HConnectable(Configuration conf) {
     this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index 9a4ef69..f185cb2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -198,6 +198,9 @@ public interface HConnection extends Connection {
    */
   boolean isTableEnabled(TableName tableName) throws IOException;
 
+  /**
+   * @deprecated instead use {@link #isTableEnabled(TableName)}
+   */
   @Deprecated
   boolean isTableEnabled(byte[] tableName) throws IOException;
 
@@ -208,6 +211,9 @@ public interface HConnection extends Connection {
    */
   boolean isTableDisabled(TableName tableName) throws IOException;
 
+  /**
+   * @deprecated instead use {@link #isTableDisabled(TableName)}
+   */
   @Deprecated
   boolean isTableDisabled(byte[] tableName) throws IOException;
 
@@ -225,6 +231,9 @@ public interface HConnection extends Connection {
    */
   boolean isTableAvailable(TableName tableName) throws IOException;
 
+  /**
+   * @deprecated instead use {@link #isTableAvailable(TableName)}
+   */
   @Deprecated
   boolean isTableAvailable(byte[] tableName) throws IOException;
 
@@ -233,20 +242,18 @@ public interface HConnection extends Connection {
    * splitkeys which was used while creating the given table.
    * Note : If this api is used after a table's region gets splitted, the api 
may return
    * false.
-   * @param tableName
-   *          tableName
-   * @param splitKeys
-   *          splitKeys used while creating table
-   * @throws IOException
-   *           if a remote or network exception occurs
-   * @deprecated internal method, do not use thru HConnection */
+   * @param tableName tableName
+   * @param splitKeys splitKeys used while creating table
+   * @throws IOException if a remote or network exception occurs
+   * @deprecated internal method, do not use through HConnection */
   @Deprecated
-  boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws
-      IOException;
+  boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws 
IOException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
-  boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws
-      IOException;
+  boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws 
IOException;
 
   /**
    * List all the userspace tables.  In other words, scan the hbase:meta table.
@@ -278,11 +285,15 @@ public interface HConnection extends Connection {
    * @param tableName table name
    * @return table metadata
    * @throws IOException if a remote or network exception occurs
+   * @deprecated internal method, do not use through HConnection
    */
   @Deprecated
   HTableDescriptor getHTableDescriptor(TableName tableName)
   throws IOException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   HTableDescriptor getHTableDescriptor(byte[] tableName)
   throws IOException;
@@ -295,19 +306,22 @@ public interface HConnection extends Connection {
    * @return HRegionLocation that describes where to find the region in
    * question
    * @throws IOException if a remote or network exception occurs
-   * @deprecated internal method, do not use thru HConnection
+   * @deprecated internal method, do not use through HConnection
    */
   @Deprecated
   public HRegionLocation locateRegion(final TableName tableName,
       final byte [] row) throws IOException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   public HRegionLocation locateRegion(final byte[] tableName,
       final byte [] row) throws IOException;
 
   /**
    * Allows flushing the region cache.
-   * @deprecated internal method, do not use thru HConnection */
+   * @deprecated internal method, do not use through HConnection */
   @Deprecated
   void clearRegionCache();
 
@@ -316,10 +330,13 @@ public interface HConnection extends Connection {
    * <code>tableName</code>
    * @param tableName Name of the table whose regions we are to remove from
    * cache.
-   * @deprecated internal method, do not use thru HConnection */
+   * @deprecated internal method, do not use through HConnection */
   @Deprecated
   void clearRegionCache(final TableName tableName);
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   void clearRegionCache(final byte[] tableName);
 
@@ -338,15 +355,21 @@ public interface HConnection extends Connection {
    * @return HRegionLocation that describes where to find the region in
    * question
    * @throws IOException if a remote or network exception occurs
-   * @deprecated internal method, do not use thru HConnection */
+   * @deprecated internal method, do not use through HConnection */
   @Deprecated
   HRegionLocation relocateRegion(final TableName tableName,
       final byte [] row) throws IOException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   HRegionLocation relocateRegion(final byte[] tableName,
       final byte [] row) throws IOException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   void updateCachedLocations(TableName tableName, byte[] rowkey,
                                     Object exception, HRegionLocation source);
@@ -359,12 +382,14 @@ public interface HConnection extends Connection {
    * @param rowkey the row
    * @param exception the exception if any. Can be null.
    * @param source the previous location
-   * @deprecated internal method, do not use thru HConnection
+   * @deprecated internal method, do not use through HConnection
    */
   @Deprecated
   void updateCachedLocations(TableName tableName, byte[] regionName, byte[] 
rowkey,
                                     Object exception, ServerName source);
-
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   void updateCachedLocations(byte[] tableName, byte[] rowkey,
                                     Object exception, HRegionLocation source);
@@ -389,6 +414,9 @@ public interface HConnection extends Connection {
   @Deprecated
   List<HRegionLocation> locateRegions(final TableName tableName) throws 
IOException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   List<HRegionLocation> locateRegions(final byte[] tableName) throws 
IOException;
 
@@ -407,6 +435,9 @@ public interface HConnection extends Connection {
       final boolean useCache,
       final boolean offlined) throws IOException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   public List<HRegionLocation> locateRegions(final byte[] tableName,
       final boolean useCache,
@@ -464,6 +495,9 @@ public interface HConnection extends Connection {
     boolean reload)
   throws IOException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   HRegionLocation getRegionLocation(byte[] tableName, byte [] row,
     boolean reload)
@@ -488,6 +522,9 @@ public interface HConnection extends Connection {
   void processBatch(List<? extends Row> actions, final TableName tableName,
       ExecutorService pool, Object[] results) throws IOException, 
InterruptedException;
 
+  /**
+   * @deprecated internal method, do not use through HConnection
+   */
   @Deprecated
   void processBatch(List<? extends Row> actions, final byte[] tableName,
       ExecutorService pool, Object[] results) throws IOException, 
InterruptedException;
@@ -504,6 +541,9 @@ public interface HConnection extends Connection {
       Object[] results,
       Batch.Callback<R> callback) throws IOException, InterruptedException;
 
+  /**
+   * @deprecated Unsupported API
+   */
   @Deprecated
   public <R> void processBatchCallback(List<? extends Row> list,
       final byte[] tableName,
@@ -554,6 +594,9 @@ public interface HConnection extends Connection {
   @Deprecated
   HTableDescriptor[] getHTableDescriptorsByTableName(List<TableName> 
tableNames) throws IOException;
 
+  /**
+   * @deprecated since 0.96.0
+   */
   @Deprecated
   HTableDescriptor[] getHTableDescriptors(List<String> tableNames) throws
       IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
index 4678092..edd071b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
@@ -86,12 +86,14 @@ import org.apache.hadoop.hbase.security.User;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 @Deprecated
-public class HConnectionManager extends ConnectionFactory {
+public final class HConnectionManager extends ConnectionFactory {
 
+  /** @deprecated connection caching is going away */
   @Deprecated
   public static final String RETRIES_BY_SERVER_KEY =
       ConnectionManager.RETRIES_BY_SERVER_KEY;
 
+  /** @deprecated connection caching is going away */
   @Deprecated
   public static final int MAX_CACHED_CONNECTION_INSTANCES =
       ConnectionManager.MAX_CACHED_CONNECTION_INSTANCES;
@@ -110,7 +112,7 @@ public class HConnectionManager extends ConnectionFactory {
    * {@link HConnectionKey}.
    * @param conf configuration
    * @return HConnection object for <code>conf</code>
-   * @throws ZooKeeperConnectionException
+   * @deprecated connection caching is going away
    */
   @Deprecated
   public static HConnection getConnection(final Configuration conf) throws 
IOException {
@@ -138,7 +140,7 @@ public class HConnectionManager extends ConnectionFactory {
    *
    * @param conf configuration
    * @return HConnection object for <code>conf</code>
-   * @throws ZooKeeperConnectionException
+   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
    */
   @Deprecated
   public static HConnection createConnection(Configuration conf) throws 
IOException {
@@ -164,7 +166,7 @@ public class HConnectionManager extends ConnectionFactory {
    * @param conf configuration
    * @param pool the thread pool to use for batch operation in HTables used 
via this HConnection
    * @return HConnection object for <code>conf</code>
-   * @throws ZooKeeperConnectionException
+   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
    */
   @Deprecated
   public static HConnection createConnection(Configuration conf, 
ExecutorService pool)
@@ -190,7 +192,7 @@ public class HConnectionManager extends ConnectionFactory {
    * @param conf configuration
    * @param user the user the connection is for
    * @return HConnection object for <code>conf</code>
-   * @throws ZooKeeperConnectionException
+   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
    */
   @Deprecated
   public static HConnection createConnection(Configuration conf, User user)
@@ -217,7 +219,7 @@ public class HConnectionManager extends ConnectionFactory {
    * @param pool the thread pool to use for batch operation in HTables used 
via this HConnection
    * @param user the user the connection is for
    * @return HConnection object for <code>conf</code>
-   * @throws ZooKeeperConnectionException
+   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
    */
   @Deprecated
   public static HConnection createConnection(Configuration conf, 
ExecutorService pool, User user)
@@ -225,12 +227,18 @@ public class HConnectionManager extends ConnectionFactory 
{
     return ConnectionManager.createConnection(conf, pool, user);
   }
 
+  /**
+   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
+   */
   @Deprecated
   static HConnection createConnection(final Configuration conf, final boolean 
managed)
       throws IOException {
     return ConnectionManager.createConnection(conf, managed);
   }
 
+  /**
+   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
+   */
   @Deprecated
   static ClusterConnection createConnection(final Configuration conf, final 
boolean managed,
       final ExecutorService pool, final User user) throws IOException {
@@ -243,7 +251,7 @@ public class HConnectionManager extends ConnectionFactory {
    * then close connection to the zookeeper ensemble and let go of all 
associated resources.
    *
    * @param conf configuration whose identity is used to find {@link 
HConnection} instance.
-   * @deprecated
+   * @deprecated connection caching is going away.
    */
   @Deprecated
   public static void deleteConnection(Configuration conf) {
@@ -255,7 +263,7 @@ public class HConnectionManager extends ConnectionFactory {
    * This will then close connection to the zookeeper ensemble and let go of 
all resources.
    *
    * @param connection
-   * @deprecated
+   * @deprecated connection caching is going away.
    */
   @Deprecated
   public static void deleteStaleConnection(HConnection connection) {
@@ -266,7 +274,7 @@ public class HConnectionManager extends ConnectionFactory {
    * Delete information for all connections. Close or not the connection, 
depending on the
    *  staleConnection boolean and the ref count. By default, you should use it 
with
    *  staleConnection to true.
-   * @deprecated
+   * @deprecated connection caching is going away.
    */
   @Deprecated
   public static void deleteAllConnections(boolean staleConnection) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 68d3f9f..2c405d7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -172,7 +172,8 @@ public class HTable implements HTableInterface {
   public HTable(Configuration conf, final TableName tableName)
   throws IOException {
     this.tableName = tableName;
-    this.cleanupPoolOnClose = this.cleanupConnectionOnClose = true;
+    this.cleanupPoolOnClose = true;
+    this.cleanupConnectionOnClose = true;
     if (conf == null) {
       this.connection = null;
       return;
@@ -361,7 +362,8 @@ public class HTable implements HTableInterface {
     }
 
     // puts need to track errors globally due to how the APIs currently work.
-    ap = new AsyncProcess(connection, configuration, pool, rpcCallerFactory, 
true, rpcControllerFactory);
+    ap = new AsyncProcess(connection, configuration, pool, rpcCallerFactory, 
true,
+        rpcControllerFactory);
     multiAp = this.connection.getAsyncProcess();
     this.locator = new HRegionLocator(getName(), connection);
   }
@@ -630,7 +632,8 @@ public class HTable implements HTableInterface {
    */
   @Deprecated
   public NavigableMap<HRegionInfo, ServerName> getRegionLocations() throws 
IOException {
-    // TODO: Odd that this returns a Map of HRI to SN whereas 
getRegionLocator, singular, returns an HRegionLocation.
+    // TODO: Odd that this returns a Map of HRI to SN whereas 
getRegionLocator, singular,
+    // returns an HRegionLocation.
     return MetaScanner.allTableRegions(this.connection, getName());
   }
 
@@ -931,7 +934,7 @@ public class HTable implements HTableInterface {
    * {@inheritDoc}
    * @deprecated If any exception is thrown by one of the actions, there is no 
way to
    * retrieve the partially executed results. Use
-   * {@link #batchCallback(List, Object[], 
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
+   * {@link #batchCallback(List, Object[], Batch.Callback)}
    * instead.
    */
   @Deprecated
@@ -984,8 +987,8 @@ public class HTable implements HTableInterface {
       throw (InterruptedIOException)new InterruptedIOException().initCause(e);
     } finally {
       // mutate list so that it is empty for complete success, or contains 
only failed records
-      // results are returned in the same order as the requests in list
-      // walk the list backwards, so we can remove from list without impacting 
the indexes of earlier members
+      // results are returned in the same order as the requests in list walk 
the list backwards,
+      // so we can remove from list without impacting the indexes of earlier 
members
       for (int i = results.length - 1; i>=0; i--) {
         // if result is not null, it succeeded
         if (results[i] instanceof Result) {
@@ -1434,6 +1437,7 @@ public class HTable implements HTableInterface {
 
   /**
    * {@inheritDoc}
+   * @deprecated Use {@link #existsAll(java.util.List)}  instead.
    */
   @Override
   @Deprecated
@@ -1541,6 +1545,10 @@ public class HTable implements HTableInterface {
 
   /**
    * {@inheritDoc}
+   * @deprecated in 0.96. When called with setAutoFlush(false), this function 
also
+   *  set clearBufferOnFail to true, which is unexpected but kept for 
historical reasons.
+   *  Replace it with setAutoFlush(false, false) if this is exactly what you 
want, or by
+   *  {@link #setAutoFlushTo(boolean)} for all other cases.
    */
   @Deprecated
   @Override
@@ -1770,9 +1778,8 @@ public class HTable implements HTableInterface {
             + Bytes.toStringBinary(e.getKey()), ee);
         throw ee.getCause();
       } catch (InterruptedException ie) {
-        throw new InterruptedIOException("Interrupted calling coprocessor 
service " + service.getName()
-            + " for row " + Bytes.toStringBinary(e.getKey()))
-            .initCause(ie);
+        throw new InterruptedIOException("Interrupted calling coprocessor 
service "
+            + service.getName() + " for row " + 
Bytes.toStringBinary(e.getKey())).initCause(ie);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 7d61a0b..10308da 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -157,7 +157,7 @@ public class HTableMultiplexer {
   }
 
   /**
-   * Deprecated. Use {@link #put(TableName, List) } instead.
+   * @deprecated Use {@link #put(TableName, List) } instead.
    */
   @Deprecated
   public List<Put> put(byte[] tableName, final List<Put> puts) {
@@ -195,7 +195,7 @@ public class HTableMultiplexer {
   }
 
   /**
-   * Deprecated. Use {@link #put(TableName, Put) } instead.
+   * @deprecated Use {@link #put(TableName, Put) } instead.
    */
   @Deprecated
   public boolean put(final byte[] tableName, final Put put, int retry) {
@@ -203,7 +203,7 @@ public class HTableMultiplexer {
   }
 
   /**
-   * Deprecated. Use {@link #put(TableName, Put)} instead.
+   * @deprecated Use {@link #put(TableName, Put)} instead.
    */
   @Deprecated
   public boolean put(final byte[] tableName, Put put) {
@@ -224,8 +224,8 @@ public class HTableMultiplexer {
         worker = serverToFlushWorkerMap.get(addr);
         if (worker == null) {
           // Create the flush worker
-          worker = new FlushWorker(workerConf, this.conn, addr, this, 
perRegionServerBufferQueueSize,
-                  pool, executor);
+          worker = new FlushWorker(workerConf, this.conn, addr, this,
+              perRegionServerBufferQueueSize, pool, executor);
           this.serverToFlushWorkerMap.put(addr, worker);
           executor.scheduleAtFixedRate(worker, flushPeriod, flushPeriod, 
TimeUnit.MILLISECONDS);
         }
@@ -343,9 +343,9 @@ public class HTableMultiplexer {
   }
 
   private static class PutStatus {
-    public final HRegionInfo regionInfo;
-    public final Put put;
-    public final int retryCount;
+    private final HRegionInfo regionInfo;
+    private final Put put;
+    private final int retryCount;
 
     public PutStatus(HRegionInfo regionInfo, Put put, int retryCount) {
       this.regionInfo = regionInfo;
@@ -384,7 +384,7 @@ public class HTableMultiplexer {
     }
 
     public synchronized void reset() {
-      this.sum = 0l;
+      this.sum = 0L;
       this.count = 0;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
index a49f95c..b1c7ac0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
@@ -256,7 +256,8 @@ public class MetaCache {
               if (updatedLocations.isEmpty()) {
                 deletedSomething |= tableLocations.remove(e.getKey(), 
regionLocations);
               } else {
-                deletedSomething |= tableLocations.replace(e.getKey(), 
regionLocations, updatedLocations);
+                deletedSomething |= tableLocations.replace(e.getKey(), 
regionLocations,
+                    updatedLocations);
               }
             }
           }
@@ -389,7 +390,8 @@ public class MetaCache {
         if (updatedLocations.isEmpty()) {
           removed = 
tableLocations.remove(location.getRegionInfo().getStartKey(), regionLocations);
         } else {
-          removed = 
tableLocations.replace(location.getRegionInfo().getStartKey(), regionLocations, 
updatedLocations);
+          removed = 
tableLocations.replace(location.getRegionInfo().getStartKey(), regionLocations,
+              updatedLocations);
         }
         if (removed && LOG.isTraceEnabled()) {
           LOG.trace("Removed " + location + " from cache");

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index 3bc4000..f281563 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -56,8 +56,11 @@ import com.google.common.annotations.VisibleForTesting;
  */
 @InterfaceAudience.Private
 //TODO: merge this to MetaTableAccessor, get rid of it.
-public class MetaScanner {
+public final class MetaScanner {
   private static final Log LOG = LogFactory.getLog(MetaScanner.class);
+
+  private MetaScanner() {}
+
   /**
    * Scans the meta table and calls a visitor on each RowResult and uses a 
empty
    * start row value as table name.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
index 16ab852..b44803b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
@@ -38,8 +38,7 @@ public final class MultiAction<R> {
   // TODO: This class should not be visible outside of the client package.
 
   // map of regions to lists of puts/gets/deletes for that region.
-  public Map<byte[], List<Action<R>>> actions =
-    new TreeMap<byte[], List<Action<R>>>(Bytes.BYTES_COMPARATOR);
+  protected Map<byte[], List<Action<R>>> actions = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
 
   private long nonceGroup = HConstants.NO_NONCE;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 8d63105..88e4e22 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -100,7 +100,7 @@ class MultiServerCallable<R> extends 
RegionServerCallable<MultiResponse> {
       final List<Action<R>> actions = e.getValue();
       regionActionBuilder.clear();
       regionActionBuilder.setRegion(RequestConverter.buildRegionSpecifier(
-        HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, 
regionName) );
+          HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, 
regionName));
 
 
       if (this.cellBlock) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 28284e5..665c59c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -192,7 +192,7 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
       }
       // add details for each cell
       for (Cell cell: entry.getValue()) {
-        if (--maxCols <= 0 ) {
+        if (--maxCols <= 0) {
           continue;
         }
         Map<String, Object> cellMap = cellToStringMap(cell);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
index dc2cb7c..9adcb6f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
@@ -25,13 +25,16 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  * Get instance of configured Registry.
  */
 @InterfaceAudience.Private
-class RegistryFactory {
+final class RegistryFactory {
+
+  private RegistryFactory() {}
+
   /**
    * @return The cluster registry implementation to use.
    * @throws IOException
    */
   static Registry getRegistry(final Connection connection)
-  throws IOException {
+      throws IOException {
     String registryClass = 
connection.getConfiguration().get("hbase.client.registry.impl",
       ZooKeeperRegistry.class.getName());
     Registry registry = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index 08d9b80..faef0d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -57,7 +57,8 @@ import org.apache.hadoop.hbase.util.Bytes;
  * To get a mapping of qualifiers to latest values for an individual family use
  * {@link #getFamilyMap(byte[])}.<p>
  *
- * To get the latest value for a specific family and qualifier use {@link 
#getValue(byte[], byte[])}.
+ * To get the latest value for a specific family and qualifier use
+ * {@link #getValue(byte[], byte[])}.
  *
  * A Result is backed by an array of {@link Cell} objects, each representing
  * an HBase cell defined by the row, family, qualifier, timestamp, and 
value.<p>
@@ -83,7 +84,8 @@ public class Result implements CellScannable, CellScanner {
   // that this is where we cache row if we're ever asked for it.
   private transient byte [] row = null;
   // Ditto for familyMap.  It can be composed on fly from passed in kvs.
-  private transient NavigableMap<byte[], NavigableMap<byte[], 
NavigableMap<Long, byte[]>>> familyMap = null;
+  private transient NavigableMap<byte[], NavigableMap<byte[], 
NavigableMap<Long, byte[]>>>
+      familyMap = null;
 
   private static ThreadLocal<byte[]> localBuffer = new ThreadLocal<byte[]>();
   private static final int PAD_WIDTH = 128;
@@ -99,9 +101,9 @@ public class Result implements CellScannable, CellScanner {
 
   /**
    * Creates an empty Result w/ no KeyValue payload; returns null if you call 
{@link #rawCells()}.
-   * Use this to represent no results if <code>null</code> won't do or in old 
'mapred' as oppposed to 'mapreduce' package
-   * MapReduce where you need to overwrite a Result
-   * instance with a {@link #copyFrom(Result)} call.
+   * Use this to represent no results if {@code null} won't do or in old 
'mapred' as opposed
+   * to 'mapreduce' package MapReduce where you need to overwrite a Result 
instance with a
+   * {@link #copyFrom(Result)} call.
    */
   public Result() {
     super();
@@ -157,7 +159,9 @@ public class Result implements CellScannable, CellScanner {
    */
   public byte [] getRow() {
     if (this.row == null) {
-      this.row = this.cells == null || this.cells.length == 0? null: 
CellUtil.cloneRow(this.cells[0]);
+      this.row = (this.cells == null || this.cells.length == 0) ?
+          null :
+          CellUtil.cloneRow(this.cells[0]);
     }
     return this.row;
   }
@@ -225,7 +229,7 @@ public class Result implements CellScannable, CellScanner {
       return result; // cant find it
     }
 
-    for (int i = pos ; i < kvs.length ; i++ ) {
+    for (int i = pos; i < kvs.length; i++) {
       if (CellUtil.matchingColumn(kvs[i], family,qualifier)) {
         result.add(kvs[i]);
       } else {
@@ -569,20 +573,18 @@ public class Result implements CellScannable, CellScanner 
{
     if(isEmpty()) {
       return null;
     }
-    this.familyMap = new TreeMap<byte[], NavigableMap<byte[], 
NavigableMap<Long, byte[]>>>(Bytes.BYTES_COMPARATOR);
+    this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for(Cell kv : this.cells) {
       byte [] family = CellUtil.cloneFamily(kv);
-      NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap =
-        familyMap.get(family);
+      NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap = 
familyMap.get(family);
       if(columnMap == null) {
-        columnMap = new TreeMap<byte[], NavigableMap<Long, byte[]>>
-          (Bytes.BYTES_COMPARATOR);
+        columnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
         familyMap.put(family, columnMap);
       }
       byte [] qualifier = CellUtil.cloneQualifier(kv);
       NavigableMap<Long, byte[]> versionMap = columnMap.get(qualifier);
       if(versionMap == null) {
-        versionMap = new TreeMap<Long, byte[]>(new Comparator<Long>() {
+        versionMap = new TreeMap<>(new Comparator<Long>() {
           @Override
           public int compare(Long l1, Long l2) {
             return l2.compareTo(l1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
index 650b5a3d..6a32e04 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
@@ -88,7 +88,7 @@ extends RetriesExhaustedException {
 
     // If all of the exceptions are DNRIOE not exception
     for (Throwable t : exceptions) {
-      if ( !(t instanceof DoNotRetryIOException)) {
+      if (!(t instanceof DoNotRetryIOException)) {
         res = true;
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
index e7c1acb..4a57adf 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
@@ -72,7 +72,8 @@ public class ReversedScannerCallable extends ScannerCallable {
    * @param replicaId the replica id
    */
   public ReversedScannerCallable(ClusterConnection connection, TableName 
tableName, Scan scan,
-      ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory 
rpcFactory, int replicaId) {
+      ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory 
rpcFactory,
+      int replicaId) {
     super(connection, tableName, scan, scanMetrics, rpcFactory, replicaId);
     this.locateStartRow = locateStartRow;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index 22f98a3..6d5bb9e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -99,7 +99,7 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
    * @param rpcControllerFactory factory to use when creating 
    *        {@link com.google.protobuf.RpcController}
    */
-  public ScannerCallable (ClusterConnection connection, TableName tableName, 
Scan scan,
+  public ScannerCallable(ClusterConnection connection, TableName tableName, 
Scan scan,
       ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) {
     this(connection, tableName, scan, scanMetrics, rpcControllerFactory, 0);
   }
@@ -111,7 +111,7 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
    * @param scanMetrics
    * @param id the replicaId
    */
-  public ScannerCallable (ClusterConnection connection, TableName tableName, 
Scan scan,
+  public ScannerCallable(ClusterConnection connection, TableName tableName, 
Scan scan,
       ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory, int 
id) {
     super(connection, tableName, scan.getStartRow());
     this.id = id;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
index 42da0b3..d03ecf6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java
@@ -25,7 +25,6 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 
-import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
 /**
@@ -50,7 +49,7 @@ public class ServerStatisticTracker {
         ServerStatistics old = stats.putIfAbsent(server, stat);
         if (old != null) {
           stat = old;
-       }
+        }
       }
     }
     stat.update(region, currentStats);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
index 55a81d6..e1eb755 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
@@ -121,12 +121,4 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
     throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
-
-//  /**
-//   * @see 
org.apache.hadoop.hbase.HTableDescriptor#addIndex(org.apache.hadoop.hbase.client.tableindexed.IndexSpecification)
-//   */
-//  @Override
-//  public void addIndex(IndexSpecification index) {
-//    throw new UnsupportedOperationException("HTableDescriptor is read-only");
-//  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9bdb81f0/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
index eab4a8a..848bd567 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
@@ -25,8 +25,11 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost.Environment;
@@ -53,7 +56,9 @@ import com.google.protobuf.ServiceException;
  * which attempt to use objects and methods outside the Environment
  * sandbox.
  */
-public class HTableWrapper implements HTableInterface {
[email protected](HBaseInterfaceAudience.COPROC)
[email protected]
+public final class HTableWrapper implements HTableInterface {
 
   private TableName tableName;
   private final Table table;
@@ -112,6 +117,10 @@ public class HTableWrapper implements HTableInterface {
     }
   }
 
+  /**
+   * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. Use
+   * {@link #setAutoFlushTo(boolean)}} instead.
+   */
   @Deprecated
   public Result getRowOrBefore(byte[] row, byte[] family)
       throws IOException {
@@ -135,6 +144,9 @@ public class HTableWrapper implements HTableInterface {
     return table.existsAll(gets);
   }
 
+  /**
+   * @deprecated Use {@link #existsAll(java.util.List)}  instead.
+   */
   @Deprecated
   public Boolean[] exists(List<Get> gets) throws IOException {
     // Do convertion.
@@ -268,8 +280,7 @@ public class HTableWrapper implements HTableInterface {
    * {@inheritDoc}
    * @deprecated If any exception is thrown by one of the actions, there is no 
way to
    * retrieve the partially executed results. Use
-   * {@link #batchCallback(List, Object[], 
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
-   * instead.
+   * {@link #batchCallback(List, Object[], Batch.Callback)} instead.
    */
   @Deprecated
   @Override

Reply via email to