Repository: hbase
Updated Branches:
  refs/heads/master aaa90d806 -> bedf849d8


HBASE-20213 [LOGGING] Aligning formatting and logging less (compactions,
in-memory compactions)

Log less. Log using same format as used elsewhere in log.

Align logs in HFileArchiver with how we format elsewhere. Removed
redundant 'region' qualifiers, tried to tighten up the emissions so
easier to read the long lines.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
 Add a label for each of the chunkcreators we make (I was confused by
two chunk creater stats emissions in log file -- didn't know that one
was for data and the other index).

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
 Formatting. Log less.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
 Make the emissions in here trace-level. When more than a few regions,
log is filled with this stuff.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bedf849d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bedf849d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bedf849d

Branch: refs/heads/master
Commit: bedf849d83ef2a6b092a57f492ab5b08a7d71b0c
Parents: aaa90d8
Author: Michael Stack <[email protected]>
Authored: Thu Mar 15 20:18:46 2018 -0700
Committer: Michael Stack <[email protected]>
Committed: Fri Mar 16 13:16:49 2018 -0700

----------------------------------------------------------------------
 .../hbase/client/AsyncRequestFutureImpl.java    | 19 +++---
 .../hadoop/hbase/ipc/NettyRpcConnection.java    |  4 +-
 .../hbase/procedure2/ProcedureExecutor.java     |  3 +-
 .../replication/ZKReplicationQueueStorage.java  | 70 +++++++-------------
 .../hadoop/hbase/backup/HFileArchiver.java      | 61 ++++++++---------
 .../org/apache/hadoop/hbase/master/HMaster.java |  4 +-
 .../master/balancer/StochasticLoadBalancer.java |  2 +-
 .../procedure/MasterProcedureScheduler.java     |  2 +-
 .../hadoop/hbase/regionserver/ChunkCreator.java | 46 +++++++------
 .../hadoop/hbase/regionserver/CompactSplit.java | 16 ++---
 .../hbase/regionserver/CompactingMemStore.java  |  5 +-
 .../hadoop/hbase/regionserver/HRegion.java      |  4 +-
 .../MemStoreCompactionStrategy.java             |  8 +--
 .../hbase/regionserver/MemStoreCompactor.java   |  6 +-
 .../hadoop/hbase/regionserver/StoreScanner.java |  6 +-
 .../regionserver/compactions/Compactor.java     | 17 +++--
 16 files changed, 122 insertions(+), 151 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
index a8b8ebf..c3cb866 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -97,7 +97,7 @@ class AsyncRequestFutureImpl<CResult> implements 
AsyncRequestFuture {
         try {
           done = waitUntilDone(startTime * 1000L + 
asyncProcess.primaryCallTimeoutMicroseconds);
         } catch (InterruptedException ex) {
-          LOG.error("Replica thread was interrupted - no replica calls: " + 
ex.getMessage());
+          LOG.error("Replica thread interrupted - no replica calls {}", 
ex.getMessage());
           return;
         }
       }
@@ -141,7 +141,7 @@ class AsyncRequestFutureImpl<CResult> implements 
AsyncRequestFuture {
       if (loc == null) return;
       HRegionLocation[] locs = loc.getRegionLocations();
       if (locs.length == 1) {
-        LOG.warn("No replicas found for " + action.getAction());
+        LOG.warn("No replicas found for {}", action.getAction());
         return;
       }
       synchronized (replicaResultLock) {
@@ -222,8 +222,8 @@ class AsyncRequestFutureImpl<CResult> implements 
AsyncRequestFuture {
           return;
         } catch (Throwable t) {
           // This should not happen. Let's log & retry anyway.
-          LOG.error("#" + asyncProcess.id + ", Caught throwable while calling. 
This is unexpected." +
-              " Retrying. Server is " + server + ", tableName=" + tableName, 
t);
+          LOG.error("id=" + asyncProcess.id + ", caught throwable. 
Unexpected." +
+              " Retrying. Server=" + server + ", tableName=" + tableName, t);
           receiveGlobalFailure(multiAction, server, numAttempt, t);
           return;
         }
@@ -239,8 +239,7 @@ class AsyncRequestFutureImpl<CResult> implements 
AsyncRequestFuture {
         }
       } catch (Throwable t) {
         // Something really bad happened. We are on the send thread that will 
now die.
-        LOG.error("Internal AsyncProcess #" + asyncProcess.id + " error for "
-            + tableName + " processing for " + server, t);
+        LOG.error("id=" + asyncProcess.id + " error for " + tableName + " 
processing " + server, t);
         throw new RuntimeException(t);
       } finally {
         asyncProcess.decTaskCounters(multiAction.getRegions(), server);
@@ -550,8 +549,8 @@ class AsyncRequestFutureImpl<CResult> implements 
AsyncRequestFuture {
             if (t instanceof RejectedExecutionException) {
               // This should never happen. But as the pool is provided by the 
end user,
               // let's secure this a little.
-              LOG.warn("#" + asyncProcess.id + ", the task was rejected by the 
pool. This is unexpected." +
-                  " Server is " + server.getServerName(), t);
+              LOG.warn("id=" + asyncProcess.id + ", task rejected by pool. 
Unexpected." +
+                  " Server=" + server.getServerName(), t);
             } else {
               // see #HBASE-14359 for more details
               LOG.warn("Caught unexpected exception/error: ", t);
@@ -659,7 +658,7 @@ class AsyncRequestFutureImpl<CResult> implements 
AsyncRequestFuture {
       try {
         pool.submit(replicaRunnable);
       } catch (RejectedExecutionException ree) {
-        LOG.warn("#" + asyncProcess.id + ", replica task was rejected by the 
pool - no replica calls", ree);
+        LOG.warn("id=" + asyncProcess.id + " replica task rejected by pool; no 
replica calls", ree);
       }
     }
   }
@@ -955,7 +954,7 @@ class AsyncRequestFutureImpl<CResult> implements 
AsyncRequestFuture {
     StringBuilder sb = new StringBuilder();
     sb.append("id=").append(asyncProcess.id).append(", 
table=").append(tableName).append(", ")
         .append("attempt=").append(numAttempt)
-        .append("/").append(asyncProcess.numTries).append(" ");
+        .append("/").append(asyncProcess.numTries).append(", ");
 
     if (failureCount > 0 || error != null){
       sb.append("failed=").append(failureCount).append("ops").append(", last 
exception=").

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
index f8a4557..b8620b1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
@@ -163,7 +163,7 @@ class NettyRpcConnection extends RpcConnection {
               relogin();
             }
           } catch (IOException e) {
-            LOG.warn("relogin failed", e);
+            LOG.warn("Relogin failed", e);
           }
           synchronized (this) {
             reloginInProgress = false;
@@ -251,7 +251,7 @@ class NettyRpcConnection extends RpcConnection {
   }
 
   private void connect() {
-    LOG.debug("Connecting to {}", remoteId.address);
+    LOG.trace("Connecting to {}", remoteId.address);
 
     this.channel = new 
Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass)
         .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay())

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 19efdc7..e5105a5 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1427,7 +1427,8 @@ public class ProcedureExecutor<TEnvironment> {
    */
   private void execProcedure(final RootProcedureState procStack,
       final Procedure<TEnvironment> procedure) {
-    Preconditions.checkArgument(procedure.getState() == 
ProcedureState.RUNNABLE);
+    Preconditions.checkArgument(procedure.getState() == 
ProcedureState.RUNNABLE,
+        procedure.toString());
 
     // Procedures can suspend themselves. They skip out by throwing a 
ProcedureSuspendedException.
     // The exception is caught below and then we hurry to the exit without 
disturbing state. The

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
----------------------------------------------------------------------
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
index fa0ff0e..4a1e780 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
@@ -182,7 +182,7 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
     try {
       ZKUtil.deleteNode(zookeeper, fileNode);
     } catch (NoNodeException e) {
-      LOG.warn(fileNode + " has already been deleted when removing log");
+      LOG.warn("{} already deleted when removing log", fileNode);
     } catch (KeeperException e) {
       throw new ReplicationException("Failed to remove wal from queue 
(serverName=" + serverName +
         ", queueId=" + queueId + ", fileName=" + fileName + ")", e);
@@ -254,8 +254,8 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
     try {
       return ZKUtil.parseWALPositionFrom(bytes);
     } catch (DeserializationException de) {
-      LOG.warn("Failed to parse log position (serverName=" + serverName + ", 
queueId=" + queueId +
-        ", fileName=" + fileName + ")");
+      LOG.warn("Failed parse log position (serverName={}, queueId={}, 
fileName={})",
+          serverName, queueId, fileName);
     }
     // if we can not parse the position, start at the beginning of the wal 
file again
     return 0;
@@ -264,8 +264,7 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   @Override
   public Pair<String, SortedSet<String>> claimQueue(ServerName 
sourceServerName, String queueId,
       ServerName destServerName) throws ReplicationException {
-    LOG.info(
-      "Atomically moving " + sourceServerName + "/" + queueId + "'s WALs to " 
+ destServerName);
+    LOG.info("Atomically moving {}/{}'s WALs to {}", sourceServerName, 
queueId, destServerName);
     try {
       ZKUtil.createWithParents(zookeeper, getRsNode(destServerName));
     } catch (KeeperException e) {
@@ -280,7 +279,7 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
       String newQueueId = queueId + "-" + sourceServerName;
       if (CollectionUtils.isEmpty(wals)) {
         ZKUtil.deleteNodeFailSilent(zookeeper, oldQueueNode);
-        LOG.info("Removed " + sourceServerName + "/" + queueId + " since it's 
empty");
+        LOG.info("Removed empty {}/{}", sourceServerName, queueId);
         return new Pair<>(newQueueId, Collections.emptySortedSet());
       }
       String newQueueNode = getQueueNode(destServerName, newQueueId);
@@ -292,9 +291,7 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
       for (String wal : wals) {
         String oldWalNode = getFileNode(oldQueueNode, wal);
         byte[] logOffset = ZKUtil.getData(this.zookeeper, oldWalNode);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Creating " + wal + " with data " + 
Bytes.toStringBinary(logOffset));
-        }
+        LOG.debug("Creating {} with data {}", wal, 
Bytes.toStringBinary(logOffset));
         String newWalNode = getFileNode(newQueueNode, wal);
         listOfOps.add(ZKUtilOp.createAndFailSilent(newWalNode, logOffset));
         listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalNode));
@@ -303,21 +300,17 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
       // add delete op for peer
       listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldQueueNode));
 
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("The multi list size is: " + listOfOps.size());
-      }
+      LOG.trace("The multi list size is {}", listOfOps.size());
       ZKUtil.multiOrSequential(zookeeper, listOfOps, false);
 
-      LOG.info(
-        "Atomically moved " + sourceServerName + "/" + queueId + "'s WALs to " 
+ destServerName);
+      LOG.info("Atomically moved {}/{}'s WALs to {}", sourceServerName, 
queueId, destServerName);
       return new Pair<>(newQueueId, logQueue);
     } catch (NoNodeException | NodeExistsException | NotEmptyException | 
BadVersionException e) {
       // Multi call failed; it looks like some other regionserver took away 
the logs.
       // These exceptions mean that zk tells us the request can not be execute 
so it is safe to just
       // return a null. For other types of exception should be thrown out to 
notify the upper layer.
-      LOG.info(
-        "Claim queue queueId=" + queueId + " from " + sourceServerName + " to 
" + destServerName +
-          " failed with " + e.toString() + ", maybe someone else has already 
took away the logs");
+      LOG.info("Claim queue queueId={} from {} to {} failed with {}, someone 
else took the log?",
+          queueId,sourceServerName, destServerName, e.toString());
       return null;
     } catch (KeeperException | InterruptedException e) {
       throw new ReplicationException("Claim queue queueId=" + queueId + " from 
" +
@@ -395,7 +388,7 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
         int v0 = getQueuesZNodeCversion();
         List<ServerName> rss = getListOfReplicators0();
         if (rss.isEmpty()) {
-          LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
+          LOG.debug("Didn't find a RegionServer that replicates, won't prevent 
deletions.");
           return Collections.emptySet();
         }
         Set<String> wals = new HashSet<>();
@@ -408,8 +401,8 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
         if (v0 == v1) {
           return wals;
         }
-        LOG.info(String.format("Replication queue node cversion changed from 
%d to %d, retry = %d",
-          v0, v1, retry));
+        LOG.info("Replication queue node cversion changed from %d to %d, retry 
= %d",
+            v0, v1, retry);
       }
     } catch (KeeperException e) {
       throw new ReplicationException("Failed to get all wals", e);
@@ -429,7 +422,7 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
     String peerNode = getHFileRefsPeerNode(peerId);
     try {
       if (ZKUtil.checkExists(zookeeper, peerNode) == -1) {
-        LOG.info("Adding peer " + peerId + " to hfile reference queue.");
+        LOG.info("Adding peer {} to hfile reference queue.", peerId);
         ZKUtil.createWithParents(zookeeper, peerNode);
       }
     } catch (KeeperException e) {
@@ -443,11 +436,9 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
     String peerNode = getHFileRefsPeerNode(peerId);
     try {
       if (ZKUtil.checkExists(zookeeper, peerNode) == -1) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Peer " + peerNode + " not found in hfile reference 
queue.");
-        }
+          LOG.debug("Peer {} not found in hfile reference queue.", peerNode);
       } else {
-        LOG.info("Removing peer " + peerNode + " from hfile reference queue.");
+        LOG.info("Removing peer {} from hfile reference queue.", peerNode);
         ZKUtil.deleteNodeRecursively(zookeeper, peerNode);
       }
     } catch (KeeperException e) {
@@ -460,17 +451,12 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   public void addHFileRefs(String peerId, List<Pair<Path, Path>> pairs)
       throws ReplicationException {
     String peerNode = getHFileRefsPeerNode(peerId);
-    boolean debugEnabled = LOG.isDebugEnabled();
-    if (debugEnabled) {
-      LOG.debug("Adding hfile references " + pairs + " in queue " + peerNode);
-    }
+    LOG.debug("Adding hfile references {} in queue {}", pairs, peerNode);
     List<ZKUtilOp> listOfOps = pairs.stream().map(p -> p.getSecond().getName())
         .map(n -> getHFileNode(peerNode, n))
         .map(f -> ZKUtilOp.createAndFailSilent(f, 
HConstants.EMPTY_BYTE_ARRAY)).collect(toList());
-    if (debugEnabled) {
-      LOG.debug("The multi list size for adding hfile references in zk for 
node " + peerNode +
-        " is " + listOfOps.size());
-    }
+      LOG.debug("The multi list size for adding hfile references in zk for 
node {} is {}",
+          peerNode, listOfOps.size());
     try {
       ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true);
     } catch (KeeperException e) {
@@ -481,17 +467,12 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   @Override
   public void removeHFileRefs(String peerId, List<String> files) throws 
ReplicationException {
     String peerNode = getHFileRefsPeerNode(peerId);
-    boolean debugEnabled = LOG.isDebugEnabled();
-    if (debugEnabled) {
-      LOG.debug("Removing hfile references " + files + " from queue " + 
peerNode);
-    }
+    LOG.debug("Removing hfile references {} from queue {}", files, peerNode);
 
     List<ZKUtilOp> listOfOps = files.stream().map(n -> getHFileNode(peerNode, 
n))
         .map(ZKUtilOp::deleteNodeFailSilent).collect(toList());
-    if (debugEnabled) {
-      LOG.debug("The multi list size for removing hfile references in zk for 
node " + peerNode +
-        " is " + listOfOps.size());
-    }
+    LOG.debug("The multi list size for removing hfile references in zk for 
node {} is {}",
+        peerNode, listOfOps.size());
     try {
       ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true);
     } catch (KeeperException e) {
@@ -546,7 +527,7 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
         int v0 = getHFileRefsZNodeCversion();
         List<String> peers = getAllPeersFromHFileRefsQueue();
         if (peers.isEmpty()) {
-          LOG.debug("Didn't find any peers with hfile references, won't 
prevent any deletions.");
+          LOG.debug("Didn't find any peers with hfile references, won't 
prevent deletions.");
           return Collections.emptySet();
         }
         Set<String> hfileRefs = new HashSet<>();
@@ -557,9 +538,8 @@ public class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
         if (v0 == v1) {
           return hfileRefs;
         }
-        LOG.debug(String.format(
-          "Replication hfile references node cversion changed from " + "%d to 
%d, retry = %d", v0,
-          v1, retry));
+        LOG.debug("Replication hfile references node cversion changed from %d 
to %d, retry = %d",
+            v0, v1, retry);
       }
     } catch (KeeperException e) {
       throw new ReplicationException("Failed to get all hfile refs", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index dea6bf8..f5496ea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -109,9 +109,7 @@ public class HFileArchiver {
    */
   public static boolean archiveRegion(FileSystem fs, Path rootdir, Path 
tableDir, Path regionDir)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("ARCHIVING " + regionDir.toString());
-    }
+    LOG.debug("ARCHIVING {}", rootdir.toString());
 
     // otherwise, we archive the files
     // make sure we can archive
@@ -145,7 +143,7 @@ public class HFileArchiver {
     FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
     // if there no files, we can just delete the directory and return;
     if (storeDirs == null) {
-      LOG.debug("Region directory " + regionDir + " empty.");
+      LOG.debug("Directory {} empty.", regionDir);
       return deleteRegionWithoutArchiving(fs, regionDir);
     }
 
@@ -194,8 +192,8 @@ public class HFileArchiver {
       RegionInfo parent, Path familyDir, byte[] family) throws IOException {
     FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
     if (storeFiles == null) {
-      LOG.debug("No store files to dispose for region=" + 
parent.getRegionNameAsString() +
-          ", family=" + Bytes.toString(family));
+      LOG.debug("No files to dispose of in {}, family={}", 
parent.getRegionNameAsString(),
+          Bytes.toString(family));
       return;
     }
 
@@ -230,15 +228,15 @@ public class HFileArchiver {
 
     // sometimes in testing, we don't have rss, so we need to check for that
     if (fs == null) {
-      LOG.warn("Passed filesystem is null, so just deleting the files without 
archiving for region:"
-          + Bytes.toString(regionInfo.getRegionName()) + ", family:" + 
Bytes.toString(family));
+      LOG.warn("Passed filesystem is null, so just deleting files without 
archiving for {}," +
+              "family={}", Bytes.toString(regionInfo.getRegionName()), 
Bytes.toString(family));
       deleteStoreFilesWithoutArchiving(compactedFiles);
       return;
     }
 
     // short circuit if we don't have any files to delete
     if (compactedFiles.isEmpty()) {
-      LOG.debug("No store files to dispose, done!");
+      LOG.debug("No files to dispose of, done!");
       return;
     }
 
@@ -255,7 +253,7 @@ public class HFileArchiver {
     }
 
     // otherwise we attempt to archive the store files
-    LOG.debug("Archiving compacted store files.");
+    LOG.debug("Archiving compacted files.");
 
     // Wrap the storefile into a File
     StoreToFile getStorePath = new StoreToFile(fs);
@@ -319,9 +317,11 @@ public class HFileArchiver {
   private static List<File> resolveAndArchive(FileSystem fs, Path 
baseArchiveDir,
       Collection<File> toArchive, long start) throws IOException {
     // short circuit if no files to move
-    if (toArchive.isEmpty()) return Collections.emptyList();
+    if (toArchive.isEmpty()) {
+      return Collections.emptyList();
+    }
 
-    if (LOG.isTraceEnabled()) LOG.trace("moving files to the archive 
directory: " + baseArchiveDir);
+    LOG.trace("Moving files to the archive directory {}", baseArchiveDir);
 
     // make sure the archive directory exists
     if (!fs.exists(baseArchiveDir)) {
@@ -329,7 +329,7 @@ public class HFileArchiver {
         throw new IOException("Failed to create the archive directory:" + 
baseArchiveDir
             + ", quitting archive attempt.");
       }
-      if (LOG.isTraceEnabled()) LOG.trace("Created archive directory:" + 
baseArchiveDir);
+      LOG.trace("Created archive directory {}", baseArchiveDir);
     }
 
     List<File> failures = new ArrayList<>();
@@ -337,7 +337,7 @@ public class HFileArchiver {
     for (File file : toArchive) {
       // if its a file archive it
       try {
-        if (LOG.isTraceEnabled()) LOG.trace("Archiving: " + file);
+        LOG.trace("Archiving {}", file);
         if (file.isFile()) {
           // attempt to archive the file
           if (!resolveAndArchiveFile(baseArchiveDir, file, startTime)) {
@@ -346,7 +346,7 @@ public class HFileArchiver {
           }
         } else {
           // otherwise its a directory and we need to archive all files
-          if (LOG.isTraceEnabled()) LOG.trace(file + " is a directory, 
archiving children files");
+          LOG.trace("{} is a directory, archiving children files", file);
           // so we add the directory name to the one base archive
           Path parentArchiveDir = new Path(baseArchiveDir, file.getName());
           // and then get all the files from that directory and attempt to
@@ -355,7 +355,7 @@ public class HFileArchiver {
           failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, 
start));
         }
       } catch (IOException e) {
-        LOG.warn("Failed to archive " + file, e);
+        LOG.warn("Failed to archive {}", file, e);
         failures.add(file);
       }
     }
@@ -385,10 +385,8 @@ public class HFileArchiver {
     // really, really unlikely situtation, where we get the same name for the 
existing file, but
     // is included just for that 1 in trillion chance.
     if (fs.exists(archiveFile)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("File:" + archiveFile + " already exists in archive, moving 
to "
-            + "timestamped backup and overwriting current.");
-      }
+      LOG.debug("{} already exists in archive, moving to timestamped backup 
and " +
+          "overwriting current.", archiveFile);
 
       // move the archive file to the stamped backup
       Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + 
archiveStartTime);
@@ -405,10 +403,7 @@ public class HFileArchiver {
       LOG.debug("Backed up archive file from " + archiveFile);
     }
 
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("No existing file in archive for: " + archiveFile +
-        ", free to archive original file.");
-    }
+    LOG.trace("No existing file in archive for {}, free to archive original 
file.", archiveFile);
 
     // at this point, we should have a free spot for the archive file
     boolean success = false;
@@ -421,11 +416,11 @@ public class HFileArchiver {
         try {
           if (!fs.exists(archiveDir)) {
             if (fs.mkdirs(archiveDir)) {
-              LOG.debug("Created archive directory:" + archiveDir);
+              LOG.debug("Created archive directory {}", archiveDir);
             }
           }
         } catch (IOException e) {
-          LOG.warn("Failed to create directory: " + archiveDir, e);
+          LOG.warn("Failed to create directory {}", archiveDir, e);
         }
       }
 
@@ -446,9 +441,7 @@ public class HFileArchiver {
       return false;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Finished archiving from " + currentFile + ", to " + 
archiveFile);
-    }
+    LOG.debug("Archived from {} to {}", currentFile, archiveFile);
     return true;
   }
 
@@ -462,10 +455,10 @@ public class HFileArchiver {
   private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path 
regionDir)
       throws IOException {
     if (fs.delete(regionDir, true)) {
-      LOG.debug("Deleted " + regionDir);
+      LOG.debug("Deleted {}", regionDir);
       return true;
     }
-    LOG.debug("Failed to delete region directory:" + regionDir);
+    LOG.debug("Failed to delete directory {}", regionDir);
     return false;
   }
 
@@ -480,13 +473,13 @@ public class HFileArchiver {
    */
   private static void deleteStoreFilesWithoutArchiving(Collection<HStoreFile> 
compactedFiles)
       throws IOException {
-    LOG.debug("Deleting store files without archiving.");
+    LOG.debug("Deleting files without archiving.");
     List<IOException> errors = new ArrayList<>(0);
     for (HStoreFile hsf : compactedFiles) {
       try {
         hsf.deleteStoreFile();
       } catch (IOException e) {
-        LOG.error("Failed to delete store file:" + hsf.getPath());
+        LOG.error("Failed to delete {}", hsf.getPath());
         errors.add(e);
       }
     }
@@ -605,7 +598,7 @@ public class HFileArchiver {
 
     @Override
     public String toString() {
-      return this.getClass() + ", file:" + getPath().toString();
+      return this.getClass().getSimpleName() + ", " + getPath().toString();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index fd7e583..0dc5aa3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1514,8 +1514,8 @@ public class HMaster extends HRegionServer implements 
MasterServices {
         TableDescriptor tblDesc = getTableDescriptors().get(table);
         if (table.isSystemTable() || (tblDesc != null &&
             !tblDesc.isNormalizationEnabled())) {
-          LOG.debug("Skipping normalization for table: " + table + ", as it's 
either system"
-              + " table or doesn't have auto normalization turned on");
+          LOG.trace("Skipping normalization for {}, as it's either system"
+              + " table or doesn't have auto normalization turned on", table);
           continue;
         }
         List<NormalizationPlan> plans = 
this.normalizer.computePlanForTable(table);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index dca9cbb..ac5ad64 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -292,7 +292,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
         continue;
       }
       if (!c.isNeeded()) {
-        LOG.debug(c.getClass().getName() + " indicated that its cost should 
not be considered");
+        LOG.debug("{} not needed", c.getClass().getSimpleName());
         continue;
       }
       sumMultiplier += multiplier;

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 120bba3..a3bd938 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -633,7 +633,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
       boolean hasLock = true;
       final LockAndQueue[] regionLocks = new LockAndQueue[regionInfo.length];
       for (int i = 0; i < regionInfo.length; ++i) {
-        LOG.info(procedure + ", " + regionInfo[i].getRegionNameAsString());
+        LOG.info("{} checking lock on {}", procedure, 
regionInfo[i].getEncodedName());
         assert table != null;
         assert regionInfo[i] != null;
         assert regionInfo[i].getTable() != null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
index e62303e..5dc8e48 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
@@ -98,13 +98,13 @@ public class ChunkCreator {
                                float poolSizePercentage, float 
indexChunkSizePercentage,
                                float initialCountPercentage,
                                HeapMemoryManager heapMemoryManager) {
-    this.dataChunksPool = initializePool(globalMemStoreSize,
+    this.dataChunksPool = initializePool("data", globalMemStoreSize,
             (1 - indexChunkSizePercentage) * poolSizePercentage,
             initialCountPercentage, chunkSize, heapMemoryManager);
     // The index chunks pool is needed only when the index type is CCM.
     // Since the pools are not created at all when the index type isn't CCM,
     // we don't need to check it here.
-    this.indexChunksPool = initializePool(globalMemStoreSize,
+    this.indexChunksPool = initializePool("index", globalMemStoreSize,
             indexChunkSizePercentage * poolSizePercentage,
             initialCountPercentage, (int) (indexChunkSizePercentage * 
chunkSize),
             heapMemoryManager);
@@ -331,8 +331,11 @@ public class ChunkCreator {
     private static final int statThreadPeriod = 60 * 5;
     private final AtomicLong chunkCount = new AtomicLong();
     private final LongAdder reusedChunkCount = new LongAdder();
+    private final String label;
 
-    MemStoreChunkPool(int chunkSize, int maxCount, int initialCount, float 
poolSizePercentage) {
+    MemStoreChunkPool(String label, int chunkSize, int maxCount, int 
initialCount,
+        float poolSizePercentage) {
+      this.label = label;
       this.chunkSize = chunkSize;
       this.maxCount = maxCount;
       this.poolSizePercentage = poolSizePercentage;
@@ -421,12 +424,10 @@ public class ChunkCreator {
         long created = chunkCount.get();
         long reused = reusedChunkCount.sum();
         long total = created + reused;
-        LOG.debug("Stats (chunk size=" + chunkSize + "): "
-                + "current pool size=" + reclaimedChunks.size()
-                + ",created chunk count=" + created
-                + ",reused chunk count=" + reused
-                + ",reuseRatio=" + (total == 0 ? "0" : 
StringUtils.formatPercent(
-                (float) reused / (float) total, 2)));
+        LOG.debug("{} Stats (chunk size={}): current pool size={}, created 
chunk count={}, " +
+                "reused chunk count={}, reuseRatio={}", label, chunkSize, 
reclaimedChunks.size(),
+            created, reused,
+            (total == 0? "0": 
StringUtils.formatPercent((float)reused/(float)total,2)));
       }
     }
 
@@ -438,7 +439,7 @@ public class ChunkCreator {
     public void onHeapMemoryTune(long newMemstoreSize, long newBlockCacheSize) 
{
       // don't do any tuning in case of offheap memstore
       if (isOffheap()) {
-        LOG.warn("Not tuning the chunk pool as it is offheap");
+        LOG.warn("{} not tuning the chunk pool as it is offheap", label);
         return;
       }
       int newMaxCount =
@@ -448,12 +449,14 @@ public class ChunkCreator {
         if (newMaxCount > this.maxCount) {
           // Max chunks getting increased. Just change the variable. Later 
calls to getChunk() would
           // create and add them to Q
-          LOG.info("Max count for chunks increased from " + this.maxCount + " 
to " + newMaxCount);
+          LOG.info("{} max count for chunks increased from {} to {}", 
this.label, this.maxCount,
+              newMaxCount);
           this.maxCount = newMaxCount;
         } else {
           // Max chunks getting decreased. We may need to clear off some of 
the pooled chunks now
           // itself. If the extra chunks are serving already, do not pool 
those when we get them back
-          LOG.info("Max count for chunks decreased from " + this.maxCount + " 
to " + newMaxCount);
+          LOG.info("{} max count for chunks decreased from {} to {}", 
this.label, this.maxCount,
+              newMaxCount);
           this.maxCount = newMaxCount;
           if (this.reclaimedChunks.size() > newMaxCount) {
             synchronized (this) {
@@ -472,11 +475,11 @@ public class ChunkCreator {
     chunkPoolDisabled = false;
   }
 
-  private MemStoreChunkPool initializePool(long globalMemStoreSize, float 
poolSizePercentage,
-                                           float initialCountPercentage, int 
chunkSize,
-                                           HeapMemoryManager 
heapMemoryManager) {
+  private MemStoreChunkPool initializePool(String label, long 
globalMemStoreSize,
+      float poolSizePercentage, float initialCountPercentage, int chunkSize,
+      HeapMemoryManager heapMemoryManager) {
     if (poolSizePercentage <= 0) {
-      LOG.info("PoolSizePercentage is less than 0. So not using pool");
+      LOG.info("{} poolSizePercentage is less than 0. So not using pool", 
label);
       return null;
     }
     if (chunkPoolDisabled) {
@@ -488,14 +491,13 @@ public class ChunkCreator {
     }
     int maxCount = (int) (globalMemStoreSize * poolSizePercentage / chunkSize);
     if (initialCountPercentage > 1.0 || initialCountPercentage < 0) {
-      throw new IllegalArgumentException(
-              MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + " must be between 0.0 
and 1.0");
+      throw new IllegalArgumentException(label + " " + 
MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY +
+          " must be between 0.0 and 1.0");
     }
     int initialCount = (int) (initialCountPercentage * maxCount);
-    LOG.info("Allocating MemStoreChunkPool with chunk size "
-            + StringUtils.byteDesc(chunkSize) + ", max count " + maxCount
-            + ", initial count " + initialCount);
-    MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(chunkSize, 
maxCount,
+    LOG.info("{} allocating {} MemStoreChunkPool with chunk size {}, max count 
{}, " +
+            "initial count {}", label, StringUtils.byteDesc(chunkSize), 
maxCount, initialCount);
+    MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(label, 
chunkSize, maxCount,
             initialCount, poolSizePercentage);
     if (heapMemoryManager != null && memStoreChunkPool != null) {
       // Register with Heap Memory manager

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
index 04a553e..98df9b1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
@@ -158,10 +158,10 @@ public class CompactSplit implements CompactionRequester, 
PropagatingConfigurati
 
   @Override
   public String toString() {
-    return "compaction_queue=("
-        + longCompactions.getQueue().size() + ":"
+    return "compactionQueue=(longCompactions="
+        + longCompactions.getQueue().size() + ":shortCompactions="
         + shortCompactions.getQueue().size() + ")"
-        + ", split_queue=" + splits.getQueue().size();
+        + ", splitQueue=" + splits.getQueue().size();
   }
 
   public String dumpQueue() {
@@ -528,10 +528,10 @@ public class CompactSplit implements CompactionRequester, 
PropagatingConfigurati
     @Override
     public String toString() {
       if (compaction != null) {
-        return "Request = " + compaction.getRequest();
+        return "Request=" + compaction.getRequest();
       } else {
-        return "regionName = " + region.toString() + ", storeName = " + 
store.toString() +
-            ", priority = " + queuedPriority + ", time = " + time;
+        return "region=" + region.toString() + ", storeName=" + 
store.toString() +
+            ", priority=" + queuedPriority + ", startTime=" + time;
       }
     }
 
@@ -591,7 +591,7 @@ public class CompactSplit implements CompactionRequester, 
PropagatingConfigurati
         boolean completed =
             region.compact(c, store, compactionThroughputController, user);
         long now = EnvironmentEdgeManager.currentTime();
-        LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " +
+        LOG.info(((completed) ? "Completed" : "Aborted") + " compaction " +
               this + "; duration=" + StringUtils.formatTimeDiff(now, start));
         if (completed) {
           // degenerate case: blocked regions require recursive enqueues
@@ -619,7 +619,7 @@ public class CompactSplit implements CompactionRequester, 
PropagatingConfigurati
         tracker.afterExecution(store);
         completeTracker.completed(store);
         region.decrementCompactionsQueuedCount();
-        LOG.debug("CompactSplitThread Status: " + CompactSplit.this);
+        LOG.debug("Status {}", CompactSplit.this);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index d60b049..efdc8ab 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -427,9 +427,8 @@ public class CompactingMemStore extends AbstractMemStore {
         // compaction is in progress
         compactor.start();
       } catch (IOException e) {
-        LOG.warn("Unable to run memstore compaction. region "
-            + getRegionServices().getRegionInfo().getRegionNameAsString() + 
"store: "
-            + getFamilyName(), e);
+        LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}",
+            getRegionServices().getRegionInfo().getEncodedName(), 
getFamilyName(), e);
       }
     } finally {
       inMemoryFlushInProgress.set(false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0a94846..c385c3c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2078,8 +2078,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
             return false;
           }
         }
-        LOG.info("Starting compaction on " + store + " in region " + this
-            + (compaction.getRequest().isOffPeak()?" as an off-peak 
compaction":""));
+        LOG.info("Starting compaction of {} in {}{}", store, this,
+            (compaction.getRequest().isOffPeak()?" as an off-peak 
compaction":""));
         doRegionCompactionPrep();
         try {
           status.setStatus("Compacting store " + store);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
index fbb5f75..c80b912 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java
@@ -85,12 +85,12 @@ public abstract class MemStoreCompactionStrategy {
     int numOfSegments = versionedList.getNumOfSegments();
     if (numOfSegments > pipelineThreshold) {
       // to avoid too many segments, merge now
-      LOG.debug("{} {}; merging {} segments", strategy, cfName, numOfSegments);
+      LOG.trace("Strategy={}, store={}; merging {} segments", strategy, 
cfName, numOfSegments);
       return getMergingAction();
     }
 
     // just flatten a segment
-    LOG.debug("{} {}; flattening a segment", strategy, cfName);
+    LOG.trace("Strategy={}, store={}; flattening a segment", strategy, cfName);
     return getFlattenAction();
   }
 
@@ -104,8 +104,8 @@ public abstract class MemStoreCompactionStrategy {
 
   protected Action compact(VersionedSegmentsList versionedList, String 
strategyInfo) {
     int numOfSegments = versionedList.getNumOfSegments();
-    LOG.debug(strategyInfo+" memory compaction for store " + cfName
-        + " compacting " + numOfSegments + " segments");
+    LOG.trace("{} in-memory compaction for store={} compacting {} segments", 
strategyInfo,
+        cfName, numOfSegments);
     return Action.COMPACT;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
index d652bfe..2629889 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
@@ -92,7 +92,7 @@ public class MemStoreCompactor {
     // get a snapshot of the list of the segments from the pipeline,
     // this local copy of the list is marked with specific version
     versionedList = compactingMemStore.getImmutableSegments();
-    LOG.debug("Starting on {}/{}",
+    LOG.trace("Speculative compaction starting on {}/{}",
         
compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(),
         compactingMemStore.getStore().getColumnFamilyName());
     HStore store = compactingMemStore.getStore();
@@ -177,8 +177,8 @@ public class MemStoreCompactor {
         }
       }
     } catch (IOException e) {
-      LOG.debug("Interrupting the MemStore in-memory compaction for store "
-          + compactingMemStore.getFamilyName());
+      LOG.trace("Interrupting in-memory compaction for store={}",
+          compactingMemStore.getFamilyName());
       Thread.currentThread().interrupt();
     } finally {
       // For the MERGE case, if the result was created, but swap didn't happen,

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 9f6a015..2bc1e01 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -980,10 +980,8 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
         heap.peek() == null || bytesRead < preadMaxBytes) {
       return;
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Switch to stream read because we have already read " + 
bytesRead +
-          " bytes from this scanner");
-    }
+    LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead,
+        this.store.getColumnFamilyName());
     scanUsePread = false;
     Cell lastTop = heap.peek();
     List<KeyValueScanner> memstoreScanners = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/bedf849d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 056f076..7dfb7fd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -199,15 +199,14 @@ public abstract class Compactor<T extends CellSink> {
       }
       tmp = fileInfo.get(TIMERANGE_KEY);
       fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: 
TimeRangeTracker.parseFrom(tmp).getMax();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Compacting " + file +
-          ", keycount=" + keyCount +
-          ", bloomtype=" + r.getBloomFilterType().toString() +
-          ", size=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1) +
-          ", encoding=" + r.getHFileReader().getDataBlockEncoding() +
-          ", seqNum=" + seqNum +
-          (allFiles ? ", earliestPutTs=" + earliestPutTs: ""));
-      }
+      LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, 
encoding={}, seqNum={}{}",
+          (file.getPath() == null? null: file.getPath().getName()),
+          keyCount,
+          r.getBloomFilterType().toString(),
+          TraditionalBinaryPrefix.long2String(r.length(), "", 1),
+          r.getHFileReader().getDataBlockEncoding(),
+          seqNum,
+          (allFiles? ", earliestPutTs=" + earliestPutTs: ""));
     }
     return fd;
   }

Reply via email to