Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 c5708a1a0 -> 53255c333


HDFS-9817. Use SLF4J in new classes. Contributed by Anu Engineer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53255c33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53255c33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53255c33

Branch: refs/heads/HDFS-1312
Commit: 53255c33316550ab1dc4a9312e2db17451517e81
Parents: c5708a1
Author: Anu Engineer <aengin...@apache.org>
Authored: Fri Mar 4 20:16:13 2016 -0800
Committer: Anu Engineer <aengin...@apache.org>
Committed: Fri Mar 4 20:16:13 2016 -0800

----------------------------------------------------------------------
 .../hdfs/server/datanode/DiskBalancer.java      |  7 +++---
 .../connectors/ConnectorFactory.java            | 15 +++++++------
 .../connectors/DBNameNodeConnector.java         |  9 ++++----
 .../connectors/JsonNodeConnector.java           |  7 +++---
 .../datamodel/DiskBalancerCluster.java          | 11 +++++-----
 .../datamodel/DiskBalancerVolumeSet.java        |  9 ++++----
 .../diskbalancer/planner/GreedyPlanner.java     | 23 ++++++++++----------
 .../diskbalancer/planner/PlannerFactory.java    |  7 +++---
 8 files changed, 47 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53255c33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 81dbb2d..d5c402e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import com.google.common.base.Preconditions;
 import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -60,7 +60,8 @@ import java.util.concurrent.locks.ReentrantLock;
 @InterfaceAudience.Private
 public class DiskBalancer {
 
-  private static final Log LOG = LogFactory.getLog(DiskBalancer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DiskBalancer
+      .class);
   private final FsDatasetSpi<?> dataset;
   private final String dataNodeUUID;
   private final BlockMover blockMover;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53255c33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
index 040923a..484a64b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
@@ -16,8 +16,8 @@
  */
 package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 
 import java.io.IOException;
@@ -28,7 +28,8 @@ import java.net.URISyntaxException;
  * Connector factory creates appropriate connector based on the URL.
  */
 public final class ConnectorFactory {
-  static final Log LOG = LogFactory.getLog(ConnectorFactory.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ConnectorFactory.class);
 
   /**
    * Constructs an appropriate connector based on the URL.
@@ -37,13 +38,13 @@ public final class ConnectorFactory {
    */
   public static ClusterConnector getCluster(URI clusterURI, Configuration
       conf) throws IOException, URISyntaxException {
-    LOG.info("Cluster URI : " + clusterURI);
-    LOG.info("scheme : " + clusterURI.getScheme());
+    LOG.debug("Cluster URI : {}" , clusterURI);
+    LOG.debug("scheme : {}" , clusterURI.getScheme());
     if (clusterURI.getScheme().startsWith("file")) {
-      LOG.info("Creating a JsonNodeConnector");
+      LOG.debug("Creating a JsonNodeConnector");
       return new JsonNodeConnector(clusterURI.toURL());
     } else {
-      LOG.info("Creating NameNode connector");
+      LOG.debug("Creating NameNode connector");
       return new DBNameNodeConnector(clusterURI, conf);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53255c33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
index c35e934..acf1fa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -42,7 +42,8 @@ import java.util.List;
  * given cluster.
  */
 class DBNameNodeConnector implements ClusterConnector {
-  static final Log LOG = LogFactory.getLog(DBNameNodeConnector.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DBNameNodeConnector.class);
   static final Path DISKBALANCER_ID_PATH = new Path("/system/diskbalancer.id");
   private final URI clusterURI;
   private final NameNodeConnector connector;
@@ -159,4 +160,4 @@ class DBNameNodeConnector implements ClusterConnector {
     }
 
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53255c33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
index bf5aebb..cc79648 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
     .DiskBalancerDataNode;
@@ -33,7 +33,8 @@ import java.util.List;
  * A connector that understands JSON data cluster models.
  */
 public class JsonNodeConnector implements ClusterConnector {
-  static final Log LOG = LogFactory.getLog(JsonNodeConnector.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JsonNodeConnector.class);
   private final URL clusterURI;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53255c33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
index c86fc9a..7b82278 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
 
 import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Planner;
@@ -66,7 +66,8 @@ import java.util.concurrent.Future;
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class DiskBalancerCluster {
 
-  static final Log LOG = LogFactory.getLog(DiskBalancerCluster.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DiskBalancerCluster.class);
   private final Set<String> exclusionList;
   private final Set<String> inclusionList;
   private ClusterConnector clusterConnector;
@@ -264,7 +265,7 @@ public class DiskBalancerCluster {
    */
   public void createOutPutDirectory() throws IOException {
     if (Files.exists(Paths.get(this.getOutput()))) {
-      LOG.fatal("An output directory already exists at this location. Path : " 
+
+      LOG.error("An output directory already exists at this location. Path : " 
+
           this.getOutput());
       throw new IOException(
           "An output directory already exists at this location. Path : " +
@@ -273,7 +274,7 @@ public class DiskBalancerCluster {
 
     File f = new File(this.getOutput());
     if (!f.mkdirs()) {
-      LOG.fatal("Unable to create the output directory. Path : " + this
+      LOG.error("Unable to create the output directory. Path : " + this
           .getOutput());
       throw new IOException(
           "Unable to create the output directory. Path : " + this.getOutput());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53255c33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
index 49c8558..2faf249 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
@@ -40,7 +40,8 @@ import java.util.UUID;
  */
 @JsonIgnoreProperties({"sortedQueue", "volumeCount", "idealUsed"})
 public class DiskBalancerVolumeSet {
-  static final Log LOG = LogFactory.getLog(DiskBalancerVolumeSet.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DiskBalancerVolumeSet.class);
   private final int maxDisks = 256;
 
   @JsonProperty("transient")
@@ -172,7 +173,7 @@ public class DiskBalancerVolumeSet {
                                       volume.getStorageType(),
                                       volume.getUuid());
 
-    LOG.fatal(errMessage);
+    LOG.error(errMessage);
     volume.setSkip(true);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53255c33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
index f0fc776..88ddca4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.diskbalancer.planner;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
     .DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
@@ -42,7 +42,8 @@ public class GreedyPlanner implements Planner {
   public static final long MB = 1024L * 1024L;
   public static final long GB = MB * 1024L;
   public static final long TB = GB * 1024L;
-  static final Log LOG = LogFactory.getLog(GreedyPlanner.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(GreedyPlanner.class);
   private final float threshold;
 
   /**
@@ -108,13 +109,13 @@ public class GreedyPlanner implements Planner {
       if (!lowVolume.isSkip() && !highVolume.isSkip()) {
         nextStep = computeMove(currentSet, lowVolume, highVolume);
       } else {
-        LOG.debug("Skipping compute move. lowVolume :" + lowVolume.getPath());
-        LOG.debug("Skipping compute move. highVolume :" + 
highVolume.getPath());
+        LOG.debug("Skipping compute move. lowVolume: {} highVolume: {}",
+            lowVolume.getPath(), highVolume.getPath());
       }
 
       applyStep(nextStep, currentSet, lowVolume, highVolume);
       if (nextStep != null) {
-        LOG.debug("Step : " + nextStep.toString());
+        LOG.debug("Step : {} ",  nextStep.toString());
         plan.addStep(nextStep);
       }
     }
@@ -179,9 +180,8 @@ public class GreedyPlanner implements Planner {
     // This disk cannot take any more data from any disk.
     // Remove it from our computation matrix.
     if (maxLowVolumeCanReceive <= 0) {
-      LOG.debug(lowVolume.getPath() +
-          " Skipping disk from computation. Maximum data size " +
-          "achieved.");
+      LOG.debug("{} Skipping disk from computation. Maximum data size " +
+          "achieved.", lowVolume.getPath());
       lowVolume.setSkip(true);
     }
 
@@ -191,9 +191,8 @@ public class GreedyPlanner implements Planner {
     // This volume cannot give any more data, remove it from the
     // computation matrix
     if (maxHighVolumeCanGive <= 0) {
-      LOG.debug(highVolume.getPath() +
-          " Skipping disk from computation. Minimum data size " +
-          "achieved.");
+      LOG.debug(" {} Skipping disk from computation. Minimum data size " +
+          "achieved.", highVolume.getPath());
       highVolume.setSkip(true);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53255c33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/PlannerFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/PlannerFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/PlannerFactory.java
index ae18e05..24f2970 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/PlannerFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/PlannerFactory.java
@@ -16,8 +16,8 @@
  */
 package org.apache.hadoop.hdfs.server.diskbalancer.planner;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
     .DiskBalancerDataNode;
 
@@ -25,7 +25,8 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
  * Returns a planner based on the user defined tags.
  */
 public final class PlannerFactory {
-  static final Log LOG = LogFactory.getLog(PlannerFactory.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PlannerFactory.class);
 
   public static final String GREEDY_PLANNER = "greedyPlanner";
 

Reply via email to