Repository: hbase
Updated Branches:
  refs/heads/branch-2 2d5065bc7 -> 8b3ae58e1


HBASE-20043 ITBLL fails against hadoop3

Fix MoveRandomRegionOfTableAction. It depended on old AM behavior.
Make it do explicit move as is required in AMv3; w/o it, it was just
closing region causing test to fail.

Fix pom so hadoop3 profile specifies a different netty3 version.

Bunch of logging format change that came of trying trying to read
the spew from this test.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8b3ae58e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8b3ae58e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8b3ae58e

Branch: refs/heads/branch-2
Commit: 8b3ae58e187f3d731a779528f565c53e40dd7695
Parents: 2d5065b
Author: Michael Stack <[email protected]>
Authored: Fri Feb 23 14:53:52 2018 -0800
Committer: Michael Stack <[email protected]>
Committed: Sat Feb 24 17:29:24 2018 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/io/ByteBufferPool.java  |  5 +--
 .../hadoop/hbase/chaos/actions/Action.java      | 32 +++++++++---------
 .../actions/MoveRandomRegionOfTableAction.java  | 13 ++++----
 .../chaos/actions/MoveRegionsOfTableAction.java | 35 ++++++++++++--------
 .../chaos/monkies/PolicyBasedChaosMonkey.java   |  2 +-
 .../hbase/chaos/policies/PeriodicPolicy.java    |  6 ++--
 .../policies/PeriodicRandomActionPolicy.java    |  3 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  4 +--
 .../hadoop/hbase/io/hfile/CacheConfig.java      |  2 +-
 .../hadoop/hbase/io/hfile/LruBlockCache.java    | 14 ++++----
 .../apache/hadoop/hbase/ipc/NettyRpcServer.java |  4 +--
 .../apache/hadoop/hbase/ipc/RpcExecutor.java    | 10 +++---
 .../org/apache/hadoop/hbase/master/HMaster.java |  1 -
 .../RegionServerProcedureManagerHost.java       |  8 ++---
 .../hbase/regionserver/CompactingMemStore.java  |  5 +--
 .../hbase/regionserver/CompactionPipeline.java  |  2 +-
 .../hadoop/hbase/regionserver/HRegion.java      |  3 +-
 .../hbase/regionserver/HRegionServer.java       |  4 +--
 .../hadoop/hbase/regionserver/HStore.java       |  2 +-
 .../hbase/regionserver/HeapMemoryManager.java   | 10 +++---
 .../hadoop/hbase/regionserver/Leases.java       |  3 +-
 .../MemStoreCompactionStrategy.java             |  5 ++-
 .../hbase/regionserver/MemStoreCompactor.java   |  3 +-
 .../hadoop/hbase/regionserver/Segment.java      |  2 +-
 .../compactions/CompactionConfiguration.java    |  9 ++---
 .../compactions/ExploringCompactionPolicy.java  |  6 ++--
 .../PressureAwareThroughputController.java      |  2 +-
 .../replication/regionserver/Replication.java   |  2 +-
 pom.xml                                         |  9 +++--
 29 files changed, 108 insertions(+), 98 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java
----------------------------------------------------------------------
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java
index 6c009b3..caca20b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java
@@ -80,8 +80,9 @@ public class ByteBufferPool {
     this.maxPoolSize = maxPoolSize;
     this.directByteBuffer = directByteBuffer;
     // TODO can add initialPoolSize config also and make those many BBs ready 
for use.
-    LOG.info("Created ByteBufferPool with bufferSize : " + bufferSize + " and 
maxPoolSize : "
-        + maxPoolSize);
+    LOG.info("Created with bufferSize={} and maxPoolSize={}",
+        org.apache.hadoop.util.StringUtils.byteDesc(bufferSize),
+        org.apache.hadoop.util.StringUtils.byteDesc(maxPoolSize));
     this.count = new AtomicInteger(0);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
index ae8cd1f..2b2c1b8 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
@@ -138,63 +138,63 @@ public class Action {
   }
 
   protected void killMaster(ServerName server) throws IOException {
-    LOG.info("Killing master:" + server);
+    LOG.info("Killing master " + server);
     cluster.killMaster(server);
     cluster.waitForMasterToStop(server, killMasterTimeout);
-    LOG.info("Killed master server:" + server);
+    LOG.info("Killed master " + server);
   }
 
   protected void startMaster(ServerName server) throws IOException {
-    LOG.info("Starting master:" + server.getHostname());
+    LOG.info("Starting master " + server.getHostname());
     cluster.startMaster(server.getHostname(), server.getPort());
     cluster.waitForActiveAndReadyMaster(startMasterTimeout);
-    LOG.info("Started master: " + server);
+    LOG.info("Started master " + server.getHostname());
   }
 
   protected void killRs(ServerName server) throws IOException {
-    LOG.info("Killing region server:" + server);
+    LOG.info("Killing regionserver " + server);
     cluster.killRegionServer(server);
     cluster.waitForRegionServerToStop(server, killRsTimeout);
-    LOG.info("Killed region server:" + server + ". Reported num of rs:"
+    LOG.info("Killed regionserver " + server + ". Reported num of rs:"
         + cluster.getClusterMetrics().getLiveServerMetrics().size());
   }
 
   protected void startRs(ServerName server) throws IOException {
-    LOG.info("Starting region server:" + server.getHostname());
+    LOG.info("Starting regionserver " + server.getAddress());
     cluster.startRegionServer(server.getHostname(), server.getPort());
     cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), 
startRsTimeout);
-    LOG.info("Started region server:" + server + ". Reported num of rs:"
+    LOG.info("Started regionserver " + server.getAddress() + ". Reported num 
of rs:"
       + cluster.getClusterMetrics().getLiveServerMetrics().size());
   }
 
   protected void killZKNode(ServerName server) throws IOException {
-    LOG.info("Killing zookeeper node:" + server);
+    LOG.info("Killing zookeeper node " + server);
     cluster.killZkNode(server);
     cluster.waitForZkNodeToStop(server, killZkNodeTimeout);
-    LOG.info("Killed zookeeper node:" + server + ". Reported num of rs:"
+    LOG.info("Killed zookeeper node " + server + ". Reported num of rs:"
       + cluster.getClusterMetrics().getLiveServerMetrics().size());
   }
 
   protected void startZKNode(ServerName server) throws IOException {
-    LOG.info("Starting zookeeper node:" + server.getHostname());
+    LOG.info("Starting zookeeper node " + server.getHostname());
     cluster.startZkNode(server.getHostname(), server.getPort());
     cluster.waitForZkNodeToStart(server, startZkNodeTimeout);
-    LOG.info("Started zookeeper node:" + server);
+    LOG.info("Started zookeeper node " + server);
   }
 
   protected void killDataNode(ServerName server) throws IOException {
-    LOG.info("Killing datanode:" + server);
+    LOG.info("Killing datanode " + server);
     cluster.killDataNode(server);
     cluster.waitForDataNodeToStop(server, killDataNodeTimeout);
-    LOG.info("Killed datanode:" + server + ". Reported num of rs:"
+    LOG.info("Killed datanode " + server + ". Reported num of rs:"
       + cluster.getClusterMetrics().getLiveServerMetrics().size());
   }
 
   protected void startDataNode(ServerName server) throws IOException {
-    LOG.info("Starting datanode:" + server.getHostname());
+    LOG.info("Starting datanode " + server.getHostname());
     cluster.startDataNode(server);
     cluster.waitForDataNodeToStart(server, startDataNodeTimeout);
-    LOG.info("Started datanode:" + server);
+    LOG.info("Started datanode " + server);
   }
 
   protected void unbalanceRegions(ClusterMetrics clusterStatus,

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java
index bb8f244..09bfe21 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java
@@ -21,10 +21,10 @@ package org.apache.hadoop.hbase.chaos.actions;
 import java.util.List;
 
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
 
 /**
 * Action that tries to move a random region of a table.
@@ -52,16 +52,17 @@ public class MoveRandomRegionOfTableAction extends Action {
     Admin admin = util.getAdmin();
 
     LOG.info("Performing action: Move random region of table " + tableName);
-    List<HRegionInfo> regions = admin.getTableRegions(tableName);
+    List<RegionInfo> regions = admin.getRegions(tableName);
     if (regions == null || regions.isEmpty()) {
       LOG.info("Table " + tableName + " doesn't have regions to move");
       return;
     }
 
-    HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
-      regions.toArray(new HRegionInfo[regions.size()]));
-    LOG.debug("Unassigning region " + region.getRegionNameAsString());
-    admin.unassign(region.getRegionName(), false);
+    RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
+      regions.toArray(new RegionInfo[regions.size()]));
+    LOG.debug("Move random region {}", region.getRegionNameAsString());
+    // Use facility over in MoveRegionsOfTableAction...
+    MoveRegionsOfTableAction.moveRegion(admin, 
MoveRegionsOfTableAction.getServers(admin), region);
     if (sleepTime > 0) {
       Thread.sleep(sleepTime);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
index 08958e8..3496b9a 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hbase.chaos.actions;
 
+import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
@@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -56,14 +58,12 @@ public class MoveRegionsOfTableAction extends Action {
     }
 
     Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin();
-    Collection<ServerName> serversList =
-        
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
-    ServerName[] servers = serversList.toArray(new 
ServerName[serversList.size()]);
+    ServerName[] servers = getServers(admin);
 
-    LOG.info("Performing action: Move regions of table " + tableName);
+    LOG.info("Performing action: Move regions of table {}", tableName);
     List<HRegionInfo> regions = admin.getTableRegions(tableName);
     if (regions == null || regions.isEmpty()) {
-      LOG.info("Table " + tableName + " doesn't have regions to move");
+      LOG.info("Table {} doesn't have regions to move", tableName);
       return;
     }
 
@@ -77,14 +77,7 @@ public class MoveRegionsOfTableAction extends Action {
         return;
       }
 
-      try {
-        String destServerName =
-          servers[RandomUtils.nextInt(0, servers.length)].getServerName();
-        LOG.debug("Moving " + regionInfo.getRegionNameAsString() + " to " + 
destServerName);
-        admin.move(regionInfo.getEncodedNameAsBytes(), 
Bytes.toBytes(destServerName));
-      } catch (Exception ex) {
-        LOG.warn("Move failed, might be caused by other chaos: " + 
ex.getMessage());
-      }
+      moveRegion(admin, servers, regionInfo);
       if (sleepTime > 0) {
         Thread.sleep(sleepTime);
       }
@@ -96,4 +89,20 @@ public class MoveRegionsOfTableAction extends Action {
       }
     }
   }
+
+  static ServerName [] getServers(Admin admin) throws IOException {
+    Collection<ServerName> serversList =
+        
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
+    return serversList.toArray(new ServerName[serversList.size()]);
+  }
+
+  static void moveRegion(Admin admin, ServerName [] servers, RegionInfo 
regionInfo) {
+    try {
+      String destServerName = servers[RandomUtils.nextInt(0, 
servers.length)].getServerName();
+      LOG.debug("Moving {} to {}", regionInfo.getRegionNameAsString(), 
destServerName);
+      admin.move(regionInfo.getEncodedNameAsBytes(), 
Bytes.toBytes(destServerName));
+    } catch (Exception ex) {
+      LOG.warn("Move failed, might be caused by other chaos: {}", 
ex.getMessage());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java
index a49f541..70636dd 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java
@@ -113,7 +113,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
 
     for (int i=0; i<policies.length; i++) {
       policies[i].init(new Policy.PolicyContext(this.util));
-      Thread monkeyThread = new Thread(policies[i], "ChaosMonkeyThread");
+      Thread monkeyThread = new Thread(policies[i], "ChaosMonkey");
       monkeyThread.start();
       monkeyThreads[i] = monkeyThread;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java
index 3b6908d..9ffef16 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java
@@ -33,7 +33,7 @@ public abstract class PeriodicPolicy extends Policy {
   public void run() {
     // Add some jitter.
     int jitter = RandomUtils.nextInt(0, (int) periodMs);
-    LOG.info("Sleeping for " + jitter + " to add jitter");
+    LOG.info("Sleeping for {} ms to add jitter", jitter);
     Threads.sleep(jitter);
 
     while (!isStopped()) {
@@ -43,7 +43,7 @@ public abstract class PeriodicPolicy extends Policy {
       if (isStopped()) return;
       long sleepTime = periodMs - (System.currentTimeMillis() - start);
       if (sleepTime > 0) {
-        LOG.info("Sleeping for: " + sleepTime);
+        LOG.info("Sleeping for {} ms", sleepTime);
         Threads.sleep(sleepTime);
       }
     }
@@ -54,6 +54,6 @@ public abstract class PeriodicPolicy extends Policy {
   @Override
   public void init(PolicyContext context) throws Exception {
     super.init(context);
-    LOG.info("Using ChaosMonkey Policy: " + this.getClass() + ", period: " + 
periodMs);
+    LOG.info("Using ChaosMonkey Policy {}, period={} ms", this.getClass(), 
periodMs);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java
index b11aa32..a0dc89e 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java
@@ -58,8 +58,7 @@ public class PeriodicRandomActionPolicy extends 
PeriodicPolicy {
     try {
       action.perform();
     } catch (Exception ex) {
-      LOG.warn("Exception occurred during performing action: "
-          + StringUtils.stringifyException(ex));
+      LOG.warn("Exception performing action: " + 
StringUtils.stringifyException(ex));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
----------------------------------------------------------------------
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 861e3b2..6238e10 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -94,8 +94,8 @@ public abstract class RemoteProcedureDispatcher<TEnv, TRemote 
extends Comparable
       return false;
     }
 
-    LOG.info("Started, threads=" + this.corePoolSize +
-      ", queueMaxSize=" + this.queueMaxSize + ", operationDelay=" + 
this.operationDelay);
+    LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), 
queueMaxSize={}, " +
+        "operationDelay={}", this.corePoolSize, this.queueMaxSize, 
this.operationDelay);
 
     // Create the timeout executor
     timeoutExecutor = new TimeoutExecutorThread();

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 969760f..ac1af91 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -550,7 +550,7 @@ public class CacheConfig {
     }
     if (blockCacheDisabled) return null;
     int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, 
HConstants.DEFAULT_BLOCKSIZE);
-    LOG.info("Allocating On heap LruBlockCache size=" +
+    LOG.info("Allocating onheap LruBlockCache size=" +
       StringUtils.byteDesc(cacheSize) + ", blockSize=" + 
StringUtils.byteDesc(blockSize));
     ONHEAP_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c);
     return ONHEAP_CACHE_INSTANCE;

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index d26b90a..2b63e54 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -731,15 +731,15 @@ public class LruBlockCache implements 
ResizableBlockCache, HeapSize {
   public String toString() {
     return MoreObjects.toStringHelper(this)
       .add("blockCount", getBlockCount())
-      .add("currentSize", getCurrentSize())
-      .add("freeSize", getFreeSize())
-      .add("maxSize", getMaxSize())
-      .add("heapSize", heapSize())
-      .add("minSize", minSize())
+      .add("currentSize", StringUtils.byteDesc(getCurrentSize()))
+      .add("freeSize", StringUtils.byteDesc(getFreeSize()))
+      .add("maxSize", StringUtils.byteDesc(getMaxSize()))
+      .add("heapSize", StringUtils.byteDesc(heapSize()))
+      .add("minSize", StringUtils.byteDesc(minSize()))
       .add("minFactor", minFactor)
-      .add("multiSize", multiSize())
+      .add("multiSize", StringUtils.byteDesc(multiSize()))
       .add("multiFactor", multiFactor)
-      .add("singleSize", singleSize())
+      .add("singleSize", StringUtils.byteDesc(singleSize()))
       .add("singleFactor", singleFactor)
       .toString();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
index 47826ae..fb2a8eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
@@ -106,7 +106,7 @@ public class NettyRpcServer extends RpcServer {
         });
     try {
       serverChannel = bootstrap.bind(this.bindAddress).sync().channel();
-      LOG.info("NettyRpcServer bind to address=" + 
serverChannel.localAddress());
+      LOG.info("Bind to {}", serverChannel.localAddress());
     } catch (InterruptedException e) {
       throw new InterruptedIOException(e.getMessage());
     }
@@ -140,7 +140,7 @@ public class NettyRpcServer extends RpcServer {
     if (!running) {
       return;
     }
-    LOG.info("Stopping server on " + this.bindAddress.getPort());
+    LOG.info("Stopping server on " + this.serverChannel.localAddress());
     if (authTokenSecretMgr != null) {
       authTokenSecretMgr.stop();
       authTokenSecretMgr = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
index e49f576..7470758 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
@@ -145,9 +145,9 @@ public abstract class RpcExecutor {
       queueClass = LinkedBlockingQueue.class;
     }
 
-    LOG.info("RpcExecutor " + this.name + " using " + this.queueClass
-        + " as call queue; numCallQueues=" + this.numCallQueues + "; 
maxQueueLength="
-        + maxQueueLength + "; handlerCount=" + this.handlerCount);
+    LOG.info("Instantiated {} with queueClass={}; " +
+        "numCallQueues={}, maxQueueLength={}, handlerCount={}",
+        this.name, this.queueClass, this.numCallQueues, maxQueueLength, 
this.handlerCount);
   }
 
   protected int computeNumCallQueues(final int handlerCount, final float 
callQueuesHandlersFactor) {
@@ -260,8 +260,8 @@ public abstract class RpcExecutor {
       handler.start();
       handlers.add(handler);
     }
-    LOG.debug("Started " + handlers.size() + " " + threadPrefix +
-        " handlers, queues=" + qsize + ", port=" + port);
+    LOG.debug("Started handlerCount={} with threadPrefix={}, numCallQueues={}, 
port={}",
+        handlers.size(), threadPrefix, qsize, port);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 8e2aa32..0c168ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3577,7 +3577,6 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   @Override
   public boolean recoverMeta() throws IOException {
     ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
-    LOG.info("Running RecoverMetaProcedure to ensure proper hbase:meta 
deploy.");
     procedureExecutor.submitProcedure(new RecoverMetaProcedure(null, true, 
latch));
     latch.await();
     LOG.info("hbase:meta deployed at=" +

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
index 5cb2529..4c01eb8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
@@ -41,17 +41,17 @@ public class RegionServerProcedureManagerHost extends
 
   public void initialize(RegionServerServices rss) throws KeeperException {
     for (RegionServerProcedureManager proc : procedures) {
-      LOG.debug("Procedure " + proc.getProcedureSignature() + " is 
initializing");
+      LOG.debug("Procedure {} initializing", proc.getProcedureSignature());
       proc.initialize(rss);
-      LOG.debug("Procedure " + proc.getProcedureSignature() + " is 
initialized");
+      LOG.debug("Procedure {} initialized", proc.getProcedureSignature());
     }
   }
 
   public void start() {
     for (RegionServerProcedureManager proc : procedures) {
-      LOG.debug("Procedure " + proc.getProcedureSignature() + " is starting");
+      LOG.debug("Procedure {} starting", proc.getProcedureSignature());
       proc.start();
-      LOG.debug("Procedure " + proc.getProcedureSignature() + " is started");
+      LOG.debug("Procedure {} started", proc.getProcedureSignature());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index bcecdc7..44b40eb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+import org.apache.hadoop.util.StringUtils;
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -144,8 +145,8 @@ public class CompactingMemStore extends AbstractMemStore {
           IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
     }
     inmemoryFlushSize = (long) (inmemoryFlushSize * factor);
-    LOG.info("Setting in-memory flush size threshold to " + inmemoryFlushSize
-        + " and immutable segments index to be of type " + indexType);
+    LOG.info("Setting in-memory flush size threshold to {} and immutable 
segments index to type={}",
+        StringUtils.byteDesc(inmemoryFlushSize), indexType);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
index 43c1f15..6e53cfc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
@@ -125,7 +125,7 @@ public class CompactionPipeline {
         return false;
       }
       suffix = versionedList.getStoreSegments();
-      LOG.debug("Swapping pipeline suffix; before={}, new segement={}",
+      LOG.debug("Swapping pipeline suffix; before={}, new segment={}",
           versionedList.getStoreSegments().size(), segment);
       swapSuffix(suffix, segment, closeSuffix);
       readOnlyCopy = new LinkedList<>(pipeline);

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6e1aceb..414bc31 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -955,8 +955,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
       nextSeqid++;
     }
 
-    LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() +
-      "; next sequenceid=" + nextSeqid);
+    LOG.info("Opened {}; next sequenceid={}", 
this.getRegionInfo().getShortNameToLog(), nextSeqid);
 
     // A region can be reopened if failed a split; reset flags
     this.closing.set(false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f26e2cb..593c08d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1140,9 +1140,7 @@ public class HRegionServer extends HasThread implements
     if (this.zooKeeper != null) {
       this.zooKeeper.close();
     }
-    LOG.info("stopping server " + this.serverName + "; zookeeper connection 
closed.");
-
-    LOG.info(Thread.currentThread().getName() + " exiting");
+    LOG.info("Exiting; stopping=" + this.serverName + "; zookeeper connection 
closed.");
   }
 
   private boolean containsMetaTableRegions() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 220881d..68a057a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -292,7 +292,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
         this.memstore = ReflectionUtils.newInstance(clz, new Object[] { conf, 
this.comparator, this,
             this.getHRegion().getRegionServicesForStores(), inMemoryCompaction 
});
     }
-    LOG.info("Memstore class name is {}", className);
+    LOG.debug("Memstore type={}", className);
     this.offPeakHours = OffPeakHours.getInstance(conf);
 
     // Setting up cache configuration for this family

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
index 2daa5d2..c32fce2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
@@ -207,10 +207,10 @@ public class HeapMemoryManager {
   }
 
   public void start(ChoreService service) {
-      LOG.info("Starting HeapMemoryTuner chore.");
-      this.heapMemTunerChore = new HeapMemoryTunerChore();
-      service.scheduleChore(heapMemTunerChore);
-      if (tunerOn) {
+    LOG.info("Starting, tuneOn={}", this.tunerOn);
+    this.heapMemTunerChore = new HeapMemoryTunerChore();
+    service.scheduleChore(heapMemTunerChore);
+    if (tunerOn) {
       // Register HeapMemoryTuner as a memstore flush listener
       memStoreFlusher.registerFlushRequestListener(heapMemTunerChore);
     }
@@ -218,7 +218,7 @@ public class HeapMemoryManager {
 
   public void stop() {
     // The thread is Daemon. Just interrupting the ongoing process.
-    LOG.info("Stopping HeapMemoryTuner chore.");
+    LOG.info("Stopping");
     this.heapMemTunerChore.cancel(true);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
index f7ee4ef..0afa381 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
@@ -144,10 +144,9 @@ public class Leases extends HasThread {
    * without any cancellation calls.
    */
   public void close() {
-    LOG.info(Thread.currentThread().getName() + " closing leases");
     this.stopRequested = true;
     leases.clear();
-    LOG.info(Thread.currentThread().getName() + " closed leases");
+    LOG.info("Closed leases");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
index d4aafed..fbb5f75 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
@@ -85,13 +85,12 @@ public abstract class MemStoreCompactionStrategy {
     int numOfSegments = versionedList.getNumOfSegments();
     if (numOfSegments > pipelineThreshold) {
       // to avoid too many segments, merge now
-      LOG.debug("{} in-memory compaction of {}; merging {} segments",
-          strategy, cfName, numOfSegments);
+      LOG.debug("{} {}; merging {} segments", strategy, cfName, numOfSegments);
       return getMergingAction();
     }
 
     // just flatten a segment
-    LOG.debug("{} in-memory compaction of {}; flattening a segment", strategy, 
cfName);
+    LOG.debug("{} {}; flattening a segment", strategy, cfName);
     return getFlattenAction();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
index a8c3362..5c908e5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
@@ -92,7 +92,8 @@ public class MemStoreCompactor {
     // get a snapshot of the list of the segments from the pipeline,
     // this local copy of the list is marked with specific version
     versionedList = compactingMemStore.getImmutableSegments();
-    LOG.debug("Starting In-Memory Compaction of {}",
+    LOG.debug("Starting on {}/{}",
+        
compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(),
         compactingMemStore.getStore().getColumnFamilyName());
     HStore store = compactingMemStore.getStore();
     RegionCoprocessorHost cpHost = store.getCoprocessorHost();

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
index 66a2ad5..70074bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
@@ -413,7 +413,7 @@ public abstract class Segment {
 
   @Override
   public String toString() {
-    String res = "Type=" + this.getClass().getSimpleName() + ", ";
+    String res = "type=" + this.getClass().getSimpleName() + ", ";
     res += "empty=" + (isEmpty()? "yes": "no") + ", ";
     res += "cellCount=" + getCellsCount() + ", ";
     res += "cellSize=" + keySize() + ", ";

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index 212eb04..0432641 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver.compactions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -149,14 +150,14 @@ public class CompactionConfiguration {
   @Override
   public String toString() {
     return String.format(
-      "size [%d, %d, %d); files [%d, %d); ratio %f; off-peak ratio %f; 
throttle point %d;"
+      "size [%s, %s, %s); files [%d, %d); ratio %f; off-peak ratio %f; 
throttle point %d;"
       + " major period %d, major jitter %f, min locality to compact %f;"
       + " tiered compaction: max_age %d, incoming window min %d,"
       + " compaction policy for tiered window %s, single output for minor %b,"
       + " compaction window factory %s",
-      minCompactSize,
-      maxCompactSize,
-      offPeakMaxCompactSize,
+      StringUtils.byteDesc(minCompactSize),
+      StringUtils.byteDesc(maxCompactSize),
+      StringUtils.byteDesc(offPeakMaxCompactSize),
       minFilesToCompact,
       maxFilesToCompact,
       compactionRatio,

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
index d9d10d9..ce42530 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
@@ -118,9 +118,9 @@ public class ExploringCompactionPolicy extends 
RatioBasedCompactionPolicy {
           + " files of size "+ smallestSize + " because the store might be 
stuck");
       return new ArrayList<>(smallest);
     }
-    LOG.debug("Exploring compaction algorithm has selected " + 
bestSelection.size()
-        + " files of size " + bestSize + " starting at candidate #" + 
bestStart +
-        " after considering " + opts + " permutations with " + optsInRatio + " 
in ratio");
+    LOG.debug("Exploring compaction algorithm has selected {}  files of size 
{} starting at " +
+      "candidate #{} after considering {} permutations with {} in ratio", 
bestSelection.size(),
+      bestSize, bestSize, opts, optsInRatio);
     return new ArrayList<>(bestSelection);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
index a1bd21b..27c25ea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
@@ -90,7 +90,7 @@ public abstract class PressureAwareThroughputController 
extends Configured imple
     if (speed >= 1E15) { // large enough to say it is unlimited
       return "unlimited";
     } else {
-      return String.format("%.2f MB/sec", speed / 1024 / 1024);
+      return String.format("%.2f MB/second", speed / 1024 / 1024);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index ad12c66..c259890 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -132,7 +132,7 @@ public class Replication implements 
ReplicationSourceService, ReplicationSinkSer
     }
     this.statsThreadPeriod =
         this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);
-    LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod);
+    LOG.debug("Replication stats-in-log period={} seconds",  
this.statsThreadPeriod);
     this.replicationLoad = new ReplicationLoad();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b3ae58e/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index a9df16f..2455253 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1344,9 +1344,6 @@
     <clover.version>4.0.3</clover.version>
     <jamon-runtime.version>2.4.1</jamon-runtime.version>
     <jettison.version>1.3.8</jettison.version>
-    <!--This property is for hadoops netty. HBase netty
-         comes in via hbase-thirdparty hbase-shaded-netty-->
-    <netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
     <!--Make sure these joni/jcodings are compatible with the versions used by 
jruby-->
     <joni.version>2.1.11</joni.version>
     <jcodings.version>1.0.18</jcodings.version>
@@ -2321,6 +2318,9 @@
         <hadoop.version>${hadoop-two.version}</hadoop.version>
         <compat.module>hbase-hadoop2-compat</compat.module>
         <assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file>
+        <!--This property is for hadoops netty. HBase netty
+             comes in via hbase-thirdparty hbase-shaded-netty-->
+        <netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
       </properties>
       <dependencyManagement>
         <dependencies>
@@ -2595,6 +2595,9 @@
         <!--Use this compat module for now. TODO: Make h3 one if we need one-->
         <compat.module>hbase-hadoop2-compat</compat.module>
         <assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file>
+        <!--This property is for hadoops netty. HBase netty
+             comes in via hbase-thirdparty hbase-shaded-netty-->
+        <netty.hadoop.version>3.10.5.Final</netty.hadoop.version>
       </properties>
      <dependencyManagement>
        <dependencies>

Reply via email to