HDFS-10560. DiskBalancer: Reuse ObjectMapper instance to improve the 
performance. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b047bc72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b047bc72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b047bc72

Branch: refs/heads/YARN-2915
Commit: b047bc7270f3461156e4d08423c728ee9c67dba5
Parents: b427ce1
Author: Anu Engineer <aengin...@apache.org>
Authored: Tue Aug 16 10:20:08 2016 -0700
Committer: Anu Engineer <aengin...@apache.org>
Committed: Tue Aug 16 10:20:08 2016 -0700

----------------------------------------------------------------------
 .../server/datanode/DiskBalancerWorkItem.java   | 11 ++++++---
 .../server/datanode/DiskBalancerWorkStatus.java | 26 +++++++++++---------
 .../hdfs/server/datanode/DiskBalancer.java      |  5 ++--
 .../server/diskbalancer/command/Command.java    |  6 +++--
 .../connectors/JsonNodeConnector.java           |  8 +++---
 .../datamodel/DiskBalancerCluster.java          | 11 ++++++---
 .../datamodel/DiskBalancerVolume.java           | 11 ++++++---
 7 files changed, 46 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b047bc72/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
index f46a987..592a89f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.htrace.fasterxml.jackson.annotation.JsonInclude;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 
 import java.io.IOException;
 
@@ -34,6 +35,10 @@ import java.io.IOException;
 @InterfaceStability.Unstable
 @JsonInclude(JsonInclude.Include.NON_DEFAULT)
 public class DiskBalancerWorkItem {
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+  private static final ObjectReader READER =
+      new ObjectMapper().reader(DiskBalancerWorkItem.class);
+
   private  long startTime;
   private long secondsElapsed;
   private long bytesToCopy;
@@ -74,8 +79,7 @@ public class DiskBalancerWorkItem {
    */
   public static DiskBalancerWorkItem parseJson(String json) throws IOException 
{
     Preconditions.checkNotNull(json);
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.readValue(json, DiskBalancerWorkItem.class);
+    return READER.readValue(json);
   }
 
   /**
@@ -169,8 +173,7 @@ public class DiskBalancerWorkItem {
    * @throws IOException
    */
   public String toJson() throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.writeValueAsString(this);
+    return MAPPER.writeValueAsString(this);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b047bc72/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
index 14789b6..94bf6a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
@@ -24,6 +24,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 import org.codehaus.jackson.map.SerializationConfig;
 
 import static org.codehaus.jackson.map.type.TypeFactory.defaultInstance;
@@ -38,6 +39,15 @@ import java.util.LinkedList;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class DiskBalancerWorkStatus {
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+  private static final ObjectMapper MAPPER_WITH_INDENT_OUTPUT =
+      new ObjectMapper().enable(
+          SerializationConfig.Feature.INDENT_OUTPUT);
+  private static final ObjectReader READER_WORKSTATUS =
+      new ObjectMapper().reader(DiskBalancerWorkStatus.class);
+  private static final ObjectReader READER_WORKENTRY = new ObjectMapper()
+      .reader(defaultInstance().constructCollectionType(List.class,
+          DiskBalancerWorkEntry.class));
 
   private final List<DiskBalancerWorkEntry> currentState;
   private Result result;
@@ -92,10 +102,7 @@ public class DiskBalancerWorkStatus {
     this.result = result;
     this.planID = planID;
     this.planFile = planFile;
-    ObjectMapper mapper = new ObjectMapper();
-    this.currentState = mapper.readValue(currentState,
-        defaultInstance().constructCollectionType(
-            List.class, DiskBalancerWorkEntry.class));
+    this.currentState = READER_WORKENTRY.readValue(currentState);
   }
 
 
@@ -141,15 +148,11 @@ public class DiskBalancerWorkStatus {
    * @throws IOException
    **/
   public String currentStateString() throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.enable(SerializationConfig.Feature.INDENT_OUTPUT);
-    return mapper.writeValueAsString(currentState);
+    return MAPPER_WITH_INDENT_OUTPUT.writeValueAsString(currentState);
   }
 
   public String toJsonString() throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.writeValueAsString(this);
-
+    return MAPPER.writeValueAsString(this);
   }
 
   /**
@@ -160,8 +163,7 @@ public class DiskBalancerWorkStatus {
    */
   public static DiskBalancerWorkStatus parseJson(String json) throws
       IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.readValue(json, DiskBalancerWorkStatus.class);
+    return READER_WORKSTATUS.readValue(json);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b047bc72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 523c0a6..ec72d97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -34,8 +34,8 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
+import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.util.Time;
-import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -263,8 +263,7 @@ public class DiskBalancer {
       for (Map.Entry<String, FsVolumeSpi> entry : volMap.entrySet()) {
         pathMap.put(entry.getKey(), entry.getValue().getBasePath());
       }
-      ObjectMapper mapper = new ObjectMapper();
-      return mapper.writeValueAsString(pathMap);
+      return JsonUtil.toJsonString(pathMap);
     } catch (DiskBalancerException ex) {
       throw ex;
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b047bc72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 3110c1a..a1c15ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -69,6 +70,8 @@ import java.util.TreeSet;
  * Common interface for command handling.
  */
 public abstract class Command extends Configured {
+  private static final ObjectReader READER =
+      new ObjectMapper().reader(HashMap.class);
   static final Logger LOG = LoggerFactory.getLogger(Command.class);
   private Map<String, String> validArgs = new HashMap<>();
   private URI clusterURI;
@@ -441,11 +444,10 @@ public abstract class Command extends Configured {
     ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
     String volumeNameJson = dnClient.getDiskBalancerSetting(
         DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
-    ObjectMapper mapper = new ObjectMapper();
 
     @SuppressWarnings("unchecked")
     Map<String, String> volumeMap =
-        mapper.readValue(volumeNameJson, HashMap.class);
+        READER.readValue(volumeNameJson);
     for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
       for (DiskBalancerVolume vol : set.getVolumes()) {
         if (volumeMap.containsKey(vol.getUuid())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b047bc72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
index cc79648..b47beff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
 
 import com.google.common.base.Preconditions;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
     .DiskBalancerDataNode;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 
 import java.io.File;
 import java.net.URL;
@@ -35,6 +37,8 @@ import java.util.List;
 public class JsonNodeConnector implements ClusterConnector {
   private static final Logger LOG =
       LoggerFactory.getLogger(JsonNodeConnector.class);
+  private static final ObjectReader READER =
+      new ObjectMapper().reader(DiskBalancerCluster.class);
   private final URL clusterURI;
 
   /**
@@ -56,9 +60,7 @@ public class JsonNodeConnector implements ClusterConnector {
     Preconditions.checkNotNull(this.clusterURI);
     String dataFilePath = this.clusterURI.getPath();
     LOG.info("Reading cluster info from file : " + dataFilePath);
-    ObjectMapper mapper = new ObjectMapper();
-    DiskBalancerCluster cluster =
-        mapper.readValue(new File(dataFilePath), DiskBalancerCluster.class);
+    DiskBalancerCluster cluster = READER.readValue(new File(dataFilePath));
     String message = String.format("Found %d node(s)",
         cluster.getNodes().size());
     LOG.info(message);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b047bc72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
index 17a6ebb..8d7fb2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
 
 import com.google.common.base.Preconditions;
+
 import org.apache.commons.io.FileUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -25,9 +26,11 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Planner;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.PlannerFactory;
+import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 
 import java.io.File;
 import java.io.IOException;
@@ -69,6 +72,8 @@ public class DiskBalancerCluster {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(DiskBalancerCluster.class);
+  private static final ObjectReader READER =
+      new ObjectMapper().reader(DiskBalancerCluster.class);
   private final Set<String> exclusionList;
   private final Set<String> inclusionList;
   private ClusterConnector clusterConnector;
@@ -118,8 +123,7 @@ public class DiskBalancerCluster {
    * @throws IOException
    */
   public static DiskBalancerCluster parseJson(String json) throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.readValue(json, DiskBalancerCluster.class);
+    return READER.readValue(json);
   }
 
   /**
@@ -232,8 +236,7 @@ public class DiskBalancerCluster {
    * @throws IOException
    */
   public String toJson() throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.writeValueAsString(this);
+    return JsonUtil.toJsonString(this);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b047bc72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index a6a8bdc..8b627b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -19,9 +19,11 @@ package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
 
 import com.google.common.base.Preconditions;
 
+import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 
 import java.io.IOException;
 
@@ -30,6 +32,9 @@ import java.io.IOException;
  */
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class DiskBalancerVolume {
+  private static final ObjectReader READER =
+      new ObjectMapper().reader(DiskBalancerVolume.class);
+
   private String path;
   private long capacity;
   private String storageType;
@@ -58,8 +63,7 @@ public class DiskBalancerVolume {
    * @throws IOException
    */
   public static DiskBalancerVolume parseJson(String json) throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.readValue(json, DiskBalancerVolume.class);
+    return READER.readValue(json);
   }
 
   /**
@@ -305,8 +309,7 @@ public class DiskBalancerVolume {
    * @throws IOException
    */
   public String toJson() throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.writeValueAsString(this);
+    return JsonUtil.toJsonString(this);
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to