Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 4eb98f70e -> 401a0ee38
  refs/heads/trunk 042ef2fa7 -> f20dc0d57


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index b78fc9c..d0ded89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -148,10 +148,8 @@ class DataXceiver extends Receiver implements Runnable {
         (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
     localAddress = peer.getLocalAddressString();
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Number of active connections is: "
-          + datanode.getXceiverCount());
-    }
+    LOG.debug("Number of active connections is: {}",
+        datanode.getXceiverCount());
   }
 
   /**
@@ -187,7 +185,7 @@ class DataXceiver extends Receiver implements Runnable {
     // This doesn't need to be in a critical section. Althogh the client
     // can resue the connection to issue a different request, trying sending
     // an OOB through the recently closed block receiver is harmless.
-    LOG.info("Sending OOB to peer: " + peer);
+    LOG.info("Sending OOB to peer: {}", peer);
     br.sendOOB();
   }
 
@@ -199,7 +197,7 @@ class DataXceiver extends Receiver implements Runnable {
       }
       xceiver.interrupt();
     }
-    LOG.info("Stopped the writer: " + peer);
+    LOG.info("Stopped the writer: {}", peer);
   }
 
   /**
@@ -239,14 +237,15 @@ class DataXceiver extends Receiver implements Runnable {
       } catch (InvalidMagicNumberException imne) {
         if (imne.isHandshake4Encryption()) {
           LOG.info("Failed to read expected encryption handshake from client " 
+
-              "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
+              "at {}. Perhaps the client " +
               "is running an older version of Hadoop which does not support " +
-              "encryption", imne);
+              "encryption", peer.getRemoteAddressString(), imne);
         } else {
           LOG.info("Failed to read expected SASL data transfer protection " +
-              "handshake from client at " + peer.getRemoteAddressString() + 
+              "handshake from client at {}" +
               ". Perhaps the client is running an older version of Hadoop " +
-              "which does not support SASL data transfer protection", imne);
+              "which does not support SASL data transfer protection",
+              peer.getRemoteAddressString(), imne);
         }
         return;
       }
@@ -302,7 +301,7 @@ class DataXceiver extends Receiver implements Runnable {
         if (LOG.isTraceEnabled()) {
           LOG.trace(s, t);
         } else {
-          LOG.info(s + "; " + t);
+          LOG.info("{}; {}", s, t.toString());
         }
       } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
         String s1 =
@@ -311,23 +310,19 @@ class DataXceiver extends Receiver implements Runnable {
         if (LOG.isTraceEnabled()) {
           LOG.trace(s1, t);
         } else {
-          LOG.info(s1 + "; " + t);          
+          LOG.info("{}; {}", s1, t.toString());
         }
       } else if (t instanceof InvalidToken) {
         // The InvalidToken exception has already been logged in
         // checkAccess() method and this is not a server error.
-        if (LOG.isTraceEnabled()) {
-          LOG.trace(s, t);
-        }
+        LOG.trace(s, t);
       } else {
         LOG.error(s, t);
       }
     } finally {
       collectThreadLocalStates();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(datanode.getDisplayName() + ":Number of active connections 
is: "
-            + datanode.getXceiverCount());
-      }
+      LOG.debug("{}:Number of active connections is: {}",
+          datanode.getDisplayName(), datanode.getXceiverCount());
       updateCurrentThreadName("Cleaning up");
       if (peer != null) {
         dataXceiverServer.closePeer(peer);
@@ -405,21 +400,22 @@ class DataXceiver extends Receiver implements Runnable {
         DomainSocket sock = peer.getDomainSocket();
         sock.sendFileDescriptors(fds, buf, 0, buf.length);
         if (supportsReceiptVerification) {
-          LOG.trace("Reading receipt verification byte for " + slotId);
+          LOG.trace("Reading receipt verification byte for {}", slotId);
           int val = sock.getInputStream().read();
           if (val < 0) {
             throw new EOFException();
           }
         } else {
-          LOG.trace("Receipt verification is not enabled on the DataNode.  " +
-                    "Not verifying " + slotId);
+          LOG.trace("Receipt verification is not enabled on the DataNode. " +
+                    "Not verifying {}", slotId);
         }
         success = true;
       }
     } finally {
       if ((!success) && (registeredSlotId != null)) {
-        LOG.info("Unregistering " + registeredSlotId + " because the " +
-            "requestShortCircuitFdsForRead operation failed.");
+        LOG.info("Unregistering {} because the " +
+            "requestShortCircuitFdsForRead operation failed.",
+            registeredSlotId);
         datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
       }
       if (ClientTraceLog.isInfoEnabled()) {
@@ -547,8 +543,8 @@ class DataXceiver extends Receiver implements Runnable {
         // We don't want to close the socket here, since that might lead to
         // bad behavior inside the poll() call.  See HADOOP-11802 for details.
         try {
-          LOG.warn("Failed to send success response back to the client.  " +
-              "Shutting down socket for " + shmInfo.getShmId() + ".");
+          LOG.warn("Failed to send success response back to the client. " +
+              "Shutting down socket for {}", shmInfo.getShmId());
           sock.shutdown();
         } catch (IOException e) {
           LOG.warn("Failed to shut down socket in error handler", e);
@@ -616,9 +612,9 @@ class DataXceiver extends Receiver implements Runnable {
           ClientReadStatusProto stat = ClientReadStatusProto.parseFrom(
               PBHelperClient.vintPrefixed(in));
           if (!stat.hasStatus()) {
-            LOG.warn("Client " + peer.getRemoteAddressString() +
-                " did not send a valid status code after reading. " +
-                "Will close connection.");
+            LOG.warn("Client {} did not send a valid status code " +
+                "after reading. Will close connection.",
+                peer.getRemoteAddressString());
             IOUtils.closeStream(out);
           }
         } catch (IOException ioe) {
@@ -633,10 +629,8 @@ class DataXceiver extends Receiver implements Runnable {
       datanode.metrics.incrBlocksRead();
       datanode.metrics.incrTotalReadTime(duration);
     } catch ( SocketException ignored ) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(dnR + ":Ignoring exception while serving " + block + " to " +
-            remoteAddress, ignored);
-      }
+      LOG.trace("{}:Ignoring exception while serving {} to {}",
+          dnR, block, remoteAddress, ignored);
       // Its ok for remote side to close the connection anytime.
       datanode.metrics.incrBlocksRead();
       IOUtils.closeStream(out);
@@ -645,8 +639,8 @@ class DataXceiver extends Receiver implements Runnable {
        * Earlier version shutdown() datanode if there is disk error.
        */
       if (!(ioe instanceof SocketTimeoutException)) {
-        LOG.warn(dnR + ":Got exception while serving " + block + " to "
-          + remoteAddress, ioe);
+        LOG.warn("{}:Got exception while serving {} to {}",
+            dnR, block, remoteAddress, ioe);
         incrDatanodeNetworkErrors();
       }
       throw ioe;
@@ -719,19 +713,18 @@ class DataXceiver extends Receiver implements Runnable {
       throw new IOException(stage + " does not support multiple targets "
           + Arrays.asList(targets));
     }
-    
+
     if (LOG.isDebugEnabled()) {
-      LOG.debug("opWriteBlock: stage=" + stage + ", clientname=" + clientname 
-               + "\n  block  =" + block + ", newGs=" + latestGenerationStamp
-               + ", bytesRcvd=[" + minBytesRcvd + ", " + maxBytesRcvd + "]"
-          + "\n  targets=" + Arrays.asList(targets)
-          + "; pipelineSize=" + pipelineSize + ", srcDataNode=" + srcDataNode
-          + ", pinning=" + pinning);
-      LOG.debug("isDatanode=" + isDatanode
-          + ", isClient=" + isClient
-          + ", isTransfer=" + isTransfer);
-      LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() +
-                " tcp no delay " + peer.getTcpNoDelay());
+      LOG.debug("opWriteBlock: stage={}, clientname={}\n  " +
+              "block  ={}, newGs={}, bytesRcvd=[{}, {}]\n  " +
+              "targets={}; pipelineSize={}, srcDataNode={}, pinning={}",
+          stage, clientname, block, latestGenerationStamp, minBytesRcvd,
+          maxBytesRcvd, Arrays.asList(targets), pipelineSize, srcDataNode,
+          pinning);
+      LOG.debug("isDatanode={}, isClient={}, isTransfer={}",
+          isDatanode, isClient, isTransfer);
+      LOG.debug("writeBlock receive buf size {} tcp no delay {}",
+          peer.getReceiveBufferSize(), peer.getTcpNoDelay());
     }
 
     // We later mutate block's generation stamp and length, but we need to
@@ -741,8 +734,8 @@ class DataXceiver extends Receiver implements Runnable {
     if (block.getNumBytes() == 0) {
       block.setNumBytes(dataXceiverServer.estimateBlockSize);
     }
-    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: "
-        + localAddress);
+    LOG.info("Receiving {} src: {} dest: {}",
+        block, remoteAddress, localAddress);
 
     DataOutputStream mirrorOut = null;  // stream to next target
     DataInputStream mirrorIn = null;    // reply from next target
@@ -778,9 +771,7 @@ class DataXceiver extends Receiver implements Runnable {
         InetSocketAddress mirrorTarget = null;
         // Connect to backup machine
         mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + mirrorNode);
-        }
+        LOG.debug("Connecting to datanode {}", mirrorNode);
         mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
         mirrorSock = datanode.newSocket();
         try {
@@ -844,11 +835,10 @@ class DataXceiver extends Receiver implements Runnable {
               
BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
             mirrorInStatus = connectAck.getStatus();
             firstBadLink = connectAck.getFirstBadLink();
-            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
-              LOG.debug("Datanode " + targets.length +
-                       " got response for connect ack " +
-                       " from downstream datanode with firstbadlink as " +
-                       firstBadLink);
+            if (mirrorInStatus != SUCCESS) {
+              LOG.debug("Datanode {} got response for connect" +
+                  "ack  from downstream datanode with firstbadlink as {}",
+                  targets.length, firstBadLink);
             }
           }
 
@@ -869,13 +859,12 @@ class DataXceiver extends Receiver implements Runnable {
           IOUtils.closeSocket(mirrorSock);
           mirrorSock = null;
           if (isClient) {
-            LOG.error(datanode + ":Exception transfering block " +
-                      block + " to mirror " + mirrorNode + ": " + e);
+            LOG.error("{}:Exception transfering block {} to mirror {}",
+                datanode, block, mirrorNode, e);
             throw e;
           } else {
-            LOG.info(datanode + ":Exception transfering " +
-                     block + " to mirror " + mirrorNode +
-                     "- continuing without the mirror", e);
+            LOG.info("{}:Exception transfering {} to mirror {}- continuing " +
+                "without the mirror", datanode, block, mirrorNode, e);
             incrDatanodeNetworkErrors();
           }
         }
@@ -883,10 +872,9 @@ class DataXceiver extends Receiver implements Runnable {
 
       // send connect-ack to source for clients and not transfer-RBW/Finalized
       if (isClient && !isTransfer) {
-        if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
-          LOG.debug("Datanode " + targets.length +
-                   " forwarding connect ack to upstream firstbadlink is " +
-                   firstBadLink);
+        if (mirrorInStatus != SUCCESS) {
+          LOG.debug("Datanode {} forwarding connect ack to upstream " +
+              "firstbadlink is {}", targets.length, firstBadLink);
         }
         BlockOpResponseProto.newBuilder()
           .setStatus(mirrorInStatus)
@@ -904,9 +892,7 @@ class DataXceiver extends Receiver implements Runnable {
 
         // send close-ack for transfer-RBW/Finalized 
         if (isTransfer) {
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("TRANSFER: send close-ack");
-          }
+          LOG.trace("TRANSFER: send close-ack");
           writeResponse(SUCCESS, null, replyOut);
         }
       }
@@ -924,15 +910,16 @@ class DataXceiver extends Receiver implements Runnable {
       if (isDatanode ||
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
-        LOG.info("Received " + block + " src: " + remoteAddress + " dest: "
-            + localAddress + " of size " + block.getNumBytes());
+        LOG.info("Received {} src: {} dest: {} of size {}",
+            block, remoteAddress, localAddress, block.getNumBytes());
       }
 
       if(isClient) {
         size = block.getNumBytes();
       }
     } catch (IOException ioe) {
-      LOG.info("opWriteBlock " + block + " received exception " + ioe);
+      LOG.info("opWriteBlock {} received exception {}",
+          block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -970,7 +957,8 @@ class DataXceiver extends Receiver implements Runnable {
           targetStorageTypes, targetStorageIds, clientName);
       writeResponse(Status.SUCCESS, null, out);
     } catch (IOException ioe) {
-      LOG.info("transferBlock " + blk + " received exception " + ioe);
+      LOG.info("transferBlock {} received exception {}",
+          blk, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1005,7 +993,8 @@ class DataXceiver extends Receiver implements Runnable {
           .writeDelimitedTo(out);
       out.flush();
     } catch (IOException ioe) {
-      LOG.info("blockChecksum " + block + " received exception " + ioe);
+      LOG.info("blockChecksum {} received exception {}",
+          block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1046,8 +1035,8 @@ class DataXceiver extends Receiver implements Runnable {
           .writeDelimitedTo(out);
       out.flush();
     } catch (IOException ioe) {
-      LOG.info("blockChecksum " + stripedBlockInfo.getBlock() +
-          " received exception " + ioe);
+      LOG.info("blockChecksum {} received exception {}",
+          stripedBlockInfo.getBlock(), ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1105,10 +1094,10 @@ class DataXceiver extends Receiver implements Runnable {
       datanode.metrics.incrBlocksRead();
       datanode.metrics.incrTotalReadTime(duration);
       
-      LOG.info("Copied " + block + " to " + peer.getRemoteAddressString());
+      LOG.info("Copied {} to {}", block, peer.getRemoteAddressString());
     } catch (IOException ioe) {
       isOpSuccess = false;
-      LOG.info("opCopyBlock " + block + " received exception " + ioe);
+      LOG.info("opCopyBlock {} received exception {}", block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1163,16 +1152,14 @@ class DataXceiver extends Receiver implements Runnable {
         ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block,
             storageType, storageId);
         if (oldReplica != null) {
-          LOG.info("Moved " + block + " from StorageType "
-              + oldReplica.getVolume().getStorageType() + " to " + 
storageType);
+          LOG.info("Moved {} from StorageType {} to {}",
+              block, oldReplica.getVolume().getStorageType(), storageType);
         }
       } else {
         block.setNumBytes(dataXceiverServer.estimateBlockSize);
         // get the output stream to the proxy
         final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + dnAddr);
-        }
+        LOG.debug("Connecting to datanode {}", dnAddr);
         InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
         proxySock = datanode.newSocket();
         NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
@@ -1229,8 +1216,8 @@ class DataXceiver extends Receiver implements Runnable {
         datanode.notifyNamenodeReceivedBlock(
             block, delHint, r.getStorageUuid(), r.isOnTransientStorage());
         
-        LOG.info("Moved " + block + " from " + peer.getRemoteAddressString()
-            + ", delHint=" + delHint);
+        LOG.info("Moved {} from {}, delHint={}",
+            block, peer.getRemoteAddressString(), delHint);
       }
     } catch (IOException ioe) {
       opStatus = ERROR;
@@ -1260,7 +1247,8 @@ class DataXceiver extends Receiver implements Runnable {
       try {
         sendResponse(opStatus, errMsg);
       } catch (IOException ioe) {
-        LOG.warn("Error writing reply back to " + 
peer.getRemoteAddressString());
+        LOG.warn("Error writing reply back to {}",
+            peer.getRemoteAddressString());
         incrDatanodeNetworkErrors();
       }
       IOUtils.closeStream(proxyOut);
@@ -1408,10 +1396,8 @@ class DataXceiver extends Receiver implements Runnable {
       final String[] storageIds) throws IOException {
     checkAndWaitForBP(blk);
     if (datanode.isBlockTokenEnabled) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Checking block access token for block '" + blk.getBlockId()
-            + "' with mode '" + mode + "'");
-      }
+      LOG.debug("Checking block access token for block '{}' with mode '{}'",
+          blk.getBlockId(), mode);
       try {
         datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode,
             storageTypes, storageIds);
@@ -1429,9 +1415,9 @@ class DataXceiver extends Receiver implements Runnable {
             resp.build().writeDelimitedTo(out);
             out.flush();
           }
-          LOG.warn("Block token verification failed: op=" + op
-              + ", remoteAddress=" + remoteAddress
-              + ", message=" + e.getLocalizedMessage());
+          LOG.warn("Block token verification failed: op={}, " +
+                  "remoteAddress={}, message={}",
+              op, remoteAddress, e.getLocalizedMessage());
           throw e;
         } finally {
           IOUtils.closeStream(out);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0c9b875..6c27d7e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6185,7 +6185,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
         LOG.debug("Get corrupt file blocks returned error: " + e.getMessage());
       }
     } catch (IOException e) {
-      LOG.warn("Get corrupt file blocks returned error: " + e.getMessage());
+      LOG.warn("Get corrupt file blocks returned error", e);
     }
     return JSON.toString(list);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index 9fc954c..6f8ce91 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ServiceFailedException;
@@ -51,6 +49,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Thread which runs inside the NN when it's in Standby state,
@@ -60,7 +60,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  */
 @InterfaceAudience.Private
 public class StandbyCheckpointer {
-  private static final Log LOG = LogFactory.getLog(StandbyCheckpointer.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(StandbyCheckpointer.class);
   private static final long PREVENT_AFTER_CANCEL_MS = 2*60*1000L;
   private final CheckpointConf checkpointConf;
   private final Configuration conf;
@@ -136,8 +137,8 @@ public class StandbyCheckpointer {
 
   public void start() {
     LOG.info("Starting standby checkpoint thread...\n" +
-        "Checkpointing active NN to possible NNs: " + activeNNAddresses + "\n" 
+
-        "Serving checkpoints at " + myNNAddress);
+        "Checkpointing active NN to possible NNs: {}\n" +
+        "Serving checkpoints at {}", activeNNAddresses, myNNAddress);
     thread.start();
   }
   
@@ -177,8 +178,8 @@ public class StandbyCheckpointer {
       assert thisCheckpointTxId >= prevCheckpointTxId;
       if (thisCheckpointTxId == prevCheckpointTxId) {
         LOG.info("A checkpoint was triggered but the Standby Node has not " +
-            "received any transactions since the last checkpoint at txid " +
-            thisCheckpointTxId + ". Skipping...");
+            "received any transactions since the last checkpoint at txid {}. " 
+
+            "Skipping...", thisCheckpointTxId);
         return;
       }
 
@@ -253,8 +254,7 @@ public class StandbyCheckpointer {
         }
 
       } catch (ExecutionException e) {
-        ioe = new IOException("Exception during image upload: " + 
e.getMessage(),
-            e.getCause());
+        ioe = new IOException("Exception during image upload", e);
         break;
       } catch (InterruptedException e) {
         ie = e;
@@ -401,15 +401,15 @@ public class StandbyCheckpointer {
           if (needCheckpoint) {
             LOG.info("Triggering a rollback fsimage for rolling upgrade.");
           } else if (uncheckpointed >= checkpointConf.getTxnCount()) {
-            LOG.info("Triggering checkpoint because there have been " + 
-                uncheckpointed + " txns since the last checkpoint, which " +
-                "exceeds the configured threshold " +
-                checkpointConf.getTxnCount());
+            LOG.info("Triggering checkpoint because there have been {} txns " +
+                "since the last checkpoint, " +
+                "which exceeds the configured threshold {}",
+                uncheckpointed, checkpointConf.getTxnCount());
             needCheckpoint = true;
           } else if (secsSinceLast >= checkpointConf.getPeriod()) {
-            LOG.info("Triggering checkpoint because it has been " +
-                secsSinceLast + " seconds since the last checkpoint, which " +
-                "exceeds the configured interval " + 
checkpointConf.getPeriod());
+            LOG.info("Triggering checkpoint because it has been {} seconds " +
+                "since the last checkpoint, which exceeds the configured " +
+                "interval {}", secsSinceLast, checkpointConf.getPeriod());
             needCheckpoint = true;
           }
 
@@ -442,7 +442,7 @@ public class StandbyCheckpointer {
             LOG.info("Checkpoint finished successfully.");
           }
         } catch (SaveNamespaceCancelledException ce) {
-          LOG.info("Checkpoint was cancelled: " + ce.getMessage());
+          LOG.info("Checkpoint was cancelled: {}", ce.getMessage());
           canceledCount++;
         } catch (InterruptedException ie) {
           LOG.info("Interrupted during checkpointing", ie);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 107decf..45c02f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1332,7 +1332,7 @@ public class MiniDFSCluster implements AutoCloseable {
     try {
       uri = new URI("hdfs://" + hostPort);
     } catch (URISyntaxException e) {
-      NameNode.LOG.warn("unexpected URISyntaxException: " + e );
+      NameNode.LOG.warn("unexpected URISyntaxException", e);
     }
     return uri;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
index 1373891..1d06616 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
@@ -32,13 +32,13 @@ import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.eclipse.jetty.util.ajax.JSON;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class drives the creation of a mini-cluster on the local machine. By
@@ -58,8 +58,8 @@ import org.eclipse.jetty.util.ajax.JSON;
  * $HADOOP_HOME/bin/hadoop jar 
$HADOOP_HOME/share/hadoop/hdfs/hadoop-hdfs-0.24.0-SNAPSHOT-tests.jar 
org.apache.hadoop.test.MiniDFSClusterManager -options...
  */
 public class MiniDFSClusterManager {
-  private static final Log LOG =
-    LogFactory.getLog(MiniDFSClusterManager.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MiniDFSClusterManager.class);
 
   private MiniDFSCluster dfs;
   private String writeDetails;
@@ -146,8 +146,8 @@ public class MiniDFSClusterManager {
                                           .build();
     dfs.waitActive();
     
-    LOG.info("Started MiniDFSCluster -- namenode on port "
-        + dfs.getNameNodePort());
+    LOG.info("Started MiniDFSCluster -- namenode on port {}",
+        dfs.getNameNodePort());
 
     if (writeConfig != null) {
       FileOutputStream fos = new FileOutputStream(new File(writeConfig));
@@ -180,7 +180,7 @@ public class MiniDFSClusterManager {
       CommandLineParser parser = new GnuParser();
       cli = parser.parse(options, args);
     } catch(ParseException e) {
-      LOG.warn("options parsing failed:  "+e.getMessage());
+      LOG.warn("options parsing failed", e);
       new HelpFormatter().printHelp("...", options);
       return false;
     }
@@ -192,7 +192,7 @@ public class MiniDFSClusterManager {
     
     if (cli.getArgs().length > 0) {
       for (String arg : cli.getArgs()) {
-        LOG.error("Unrecognized option: " + arg);
+        LOG.error("Unrecognized option: {}", arg);
         new HelpFormatter().printHelp("...", options);
         return false;
       }
@@ -236,12 +236,12 @@ public class MiniDFSClusterManager {
           conf2.set(keyval[0], keyval[1]);
           num_confs_updated++;
         } else {
-          LOG.warn("Ignoring -D option " + prop);
+          LOG.warn("Ignoring -D option {}", prop);
         }
       }
     }
-    LOG.info("Updated " + num_confs_updated +
-        " configuration settings from command line.");
+    LOG.info("Updated {} configuration settings from command line.",
+        num_confs_updated);
   }
 
   /**
@@ -254,8 +254,8 @@ public class MiniDFSClusterManager {
         return Integer.parseInt(o);
       } 
     } catch (NumberFormatException ex) {
-      LOG.error("Couldn't parse value (" + o + ") for option " 
-          + argName + ". Using default: " + defaultValue);
+      LOG.error("Couldn't parse value ({}) for option {}. " +
+          "Using default: {}", o, argName, defaultValue);
     }
     
     return defaultValue;    

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
 
b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
index 3507b7f..e476223 100644
--- 
a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
+++ 
b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
@@ -45,8 +45,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.tools.rumen.JobStoryProducer;
 import org.apache.hadoop.tools.rumen.ZombieJobProducer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Driver class for the Gridmix3 benchmark. Gridmix accepts a timestamped
@@ -58,7 +58,7 @@ import org.apache.commons.logging.LogFactory;
  */
 public class Gridmix extends Configured implements Tool {
 
-  public static final Log LOG = LogFactory.getLog(Gridmix.class);
+  public static final Logger LOG = LoggerFactory.getLogger(Gridmix.class);
 
   /**
    * Output (scratch) directory for submitted jobs. Relative paths are
@@ -184,8 +184,8 @@ public class Gridmix extends Configured implements Tool {
       final Configuration conf = getConf();
 
       if (inputDir.getFileSystem(conf).exists(inputDir)) {
-        LOG.error("Gridmix input data directory " + inputDir
-                  + " already exists when -generate option is used.\n");
+        LOG.error("Gridmix input data directory {} already exists " +
+            "when -generate option is used.", inputDir);
         return STARTUP_FAILED_ERROR;
       }
 
@@ -193,13 +193,13 @@ public class Gridmix extends Configured implements Tool {
       CompressionEmulationUtil.setupDataGeneratorConfig(conf);
     
       final GenerateData genData = new GenerateData(conf, inputDir, genbytes);
-      LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) +
-               " of test data...");
+      LOG.info("Generating {} of test data...",
+          StringUtils.TraditionalBinaryPrefix.long2String(genbytes, "", 1));
       launchGridmixJob(genData);
     
       FsShell shell = new FsShell(conf);
       try {
-        LOG.info("Changing the permissions for inputPath " + 
inputDir.toString());
+        LOG.info("Changing the permissions for inputPath {}", inputDir);
         shell.run(new String[] {"-chmod","-R","777", inputDir.toString()});
       } catch (Exception e) {
         LOG.error("Couldnt change the file permissions " , e);
@@ -528,9 +528,7 @@ public class Gridmix extends Configured implements Tool {
         statistics.start();
       } catch (Throwable e) {
         LOG.error("Startup failed. " + e.toString() + "\n");
-        if (LOG.isDebugEnabled()) {
-          e.printStackTrace();
-        }
+        LOG.debug("Startup failed", e);
         if (factory != null) factory.abort(); // abort pipeline
         exitCode = STARTUP_FAILED_ERROR;
       } finally {
@@ -561,7 +559,7 @@ public class Gridmix extends Configured implements Tool {
         summarizer.finalize(factory, traceIn, genbytes, userResolver, stats, 
                             conf);
       }
-      IOUtils.cleanup(LOG, trace);
+      IOUtils.cleanupWithLogger(LOG, trace);
     }
     return exitCode;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
 
b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
index aa52f83..bd025ac 100644
--- 
a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
+++ 
b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
@@ -18,12 +18,12 @@
 
 package org.apache.hadoop.fs.swift.http;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
 import org.apache.hadoop.fs.swift.util.SwiftUtils;
 import org.apache.http.HttpResponse;
 import org.apache.http.client.methods.HttpRequestBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayInputStream;
 import java.io.EOFException;
@@ -44,8 +44,8 @@ import java.net.URI;
  */
 public class HttpInputStreamWithRelease extends InputStream {
 
-  private static final Log LOG =
-    LogFactory.getLog(HttpInputStreamWithRelease.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HttpInputStreamWithRelease.class);
   private final URI uri;
   private HttpRequestBase req;
   private HttpResponse resp;
@@ -100,9 +100,7 @@ public class HttpInputStreamWithRelease extends InputStream 
{
     if (!released) {
       reasonClosed = reason;
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Releasing connection to " + uri + ":  " + reason, ex);
-        }
+        LOG.debug("Releasing connection to {}:  {}", uri, reason, ex);
         if (req != null) {
           if (!dataConsumed) {
             req.abort();
@@ -137,7 +135,7 @@ public class HttpInputStreamWithRelease extends InputStream 
{
     try {
       release(operation, ex);
     } catch (IOException ioe) {
-      LOG.debug("Exception during release: " + operation + " - " + ioe, ioe);
+      LOG.debug("Exception during release: {}", operation, ioe);
       //make this the exception if there was none before
       if (ex == null) {
         ex = ioe;
@@ -173,9 +171,7 @@ public class HttpInputStreamWithRelease extends InputStream 
{
     try {
       read = inStream.read();
     } catch (EOFException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("EOF exception " + e, e);
-      }
+      LOG.debug("EOF exception", e);
       read = -1;
     } catch (IOException e) {
       throw releaseAndRethrow("read()", e);
@@ -200,9 +196,7 @@ public class HttpInputStreamWithRelease extends InputStream 
{
     try {
       read = inStream.read(b, off, len);
     } catch (EOFException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("EOF exception " + e, e);
-      }
+      LOG.debug("EOF exception", e);
       read = -1;
     } catch (IOException e) {
       throw releaseAndRethrow("read(b, off, " + len + ")", e);
@@ -222,13 +216,12 @@ public class HttpInputStreamWithRelease extends 
InputStream {
   protected void finalize() {
     try {
       if (release("finalize()", constructionStack)) {
-        LOG.warn("input stream of " + uri
-                 + " not closed properly -cleaned up in finalize()");
+        LOG.warn("input stream of {}" +
+                 " not closed properly -cleaned up in finalize()", uri);
       }
     } catch (Exception e) {
       //swallow anything that failed here
-      LOG.warn("Exception while releasing " + uri + "in finalizer",
-               e);
+      LOG.warn("Exception while releasing {} in finalizer", uri, e);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to