[13/37] hadoop git commit: HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in Datanode. Contributed by Mukul Kumar Singh.

2018-11-06 Thread brahma
HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in 
Datanode. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942693bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942693bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942693bd

Branch: refs/heads/HDFS-13891
Commit: 942693bddd5fba51b85a5f677e3496a41817cff3
Parents: c8ca174
Author: Shashikant Banerjee 
Authored: Mon Nov 5 23:43:22 2018 +0530
Committer: Shashikant Banerjee 
Committed: Mon Nov 5 23:43:22 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java | 11 +++---
 .../container/keyvalue/helpers/ChunkUtils.java  | 28 ---
 .../keyvalue/impl/ChunkManagerImpl.java |  2 +-
 .../keyvalue/interfaces/ChunkManager.java   |  3 +-
 .../keyvalue/TestChunkManagerImpl.java  | 37 ++--
 .../common/impl/TestContainerPersistence.java   | 28 ++-
 6 files changed, 62 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4cb23ed..1271d99 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -76,7 +77,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import static org.apache.hadoop.hdds.HddsConfigKeys
 .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
@@ -652,10 +653,10 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
   Preconditions.checkNotNull(chunkInfo);
 
-  byte[] data = null;
+  ByteBuffer data = null;
   if (request.getWriteChunk().getStage() == Stage.WRITE_DATA ||
   request.getWriteChunk().getStage() == Stage.COMBINED) {
-data = request.getWriteChunk().getData().toByteArray();
+data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
   }
 
   chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data,
@@ -713,7 +714,7 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
   putSmallFileReq.getChunkInfo());
   Preconditions.checkNotNull(chunkInfo);
-  byte[] data = putSmallFileReq.getData().toByteArray();
+  ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
   // chunks will be committed as a part of handling putSmallFile
   // here. There is no need to maintain this info in openContainerBlockMap.
   chunkManager.writeChunk(
@@ -724,7 +725,7 @@ public class KeyValueHandler extends Handler {
   blockData.setChunks(chunks);
   // TODO: add bcsId as a part of putSmallFile transaction
   blockManager.putBlock(kvContainer, blockData);
-  metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
+  metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
 
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 20598d9..718f5de 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 

[34/48] hadoop git commit: HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in Datanode. Contributed by Mukul Kumar Singh.

2018-11-06 Thread brahma
HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in 
Datanode. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942693bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942693bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942693bd

Branch: refs/heads/HDFS-13532
Commit: 942693bddd5fba51b85a5f677e3496a41817cff3
Parents: c8ca174
Author: Shashikant Banerjee 
Authored: Mon Nov 5 23:43:22 2018 +0530
Committer: Shashikant Banerjee 
Committed: Mon Nov 5 23:43:22 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java | 11 +++---
 .../container/keyvalue/helpers/ChunkUtils.java  | 28 ---
 .../keyvalue/impl/ChunkManagerImpl.java |  2 +-
 .../keyvalue/interfaces/ChunkManager.java   |  3 +-
 .../keyvalue/TestChunkManagerImpl.java  | 37 ++--
 .../common/impl/TestContainerPersistence.java   | 28 ++-
 6 files changed, 62 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4cb23ed..1271d99 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -76,7 +77,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import static org.apache.hadoop.hdds.HddsConfigKeys
 .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
@@ -652,10 +653,10 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
   Preconditions.checkNotNull(chunkInfo);
 
-  byte[] data = null;
+  ByteBuffer data = null;
   if (request.getWriteChunk().getStage() == Stage.WRITE_DATA ||
   request.getWriteChunk().getStage() == Stage.COMBINED) {
-data = request.getWriteChunk().getData().toByteArray();
+data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
   }
 
   chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data,
@@ -713,7 +714,7 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
   putSmallFileReq.getChunkInfo());
   Preconditions.checkNotNull(chunkInfo);
-  byte[] data = putSmallFileReq.getData().toByteArray();
+  ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
   // chunks will be committed as a part of handling putSmallFile
   // here. There is no need to maintain this info in openContainerBlockMap.
   chunkManager.writeChunk(
@@ -724,7 +725,7 @@ public class KeyValueHandler extends Handler {
   blockData.setChunks(chunks);
   // TODO: add bcsId as a part of putSmallFile transaction
   blockManager.putBlock(kvContainer, blockData);
-  metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
+  metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
 
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 20598d9..718f5de 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 

[45/50] [abbrv] hadoop git commit: HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in Datanode. Contributed by Mukul Kumar Singh.

2018-11-05 Thread aengineer
HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in 
Datanode. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942693bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942693bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942693bd

Branch: refs/heads/HDDS-4
Commit: 942693bddd5fba51b85a5f677e3496a41817cff3
Parents: c8ca174
Author: Shashikant Banerjee 
Authored: Mon Nov 5 23:43:22 2018 +0530
Committer: Shashikant Banerjee 
Committed: Mon Nov 5 23:43:22 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java | 11 +++---
 .../container/keyvalue/helpers/ChunkUtils.java  | 28 ---
 .../keyvalue/impl/ChunkManagerImpl.java |  2 +-
 .../keyvalue/interfaces/ChunkManager.java   |  3 +-
 .../keyvalue/TestChunkManagerImpl.java  | 37 ++--
 .../common/impl/TestContainerPersistence.java   | 28 ++-
 6 files changed, 62 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4cb23ed..1271d99 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -76,7 +77,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import static org.apache.hadoop.hdds.HddsConfigKeys
 .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
@@ -652,10 +653,10 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
   Preconditions.checkNotNull(chunkInfo);
 
-  byte[] data = null;
+  ByteBuffer data = null;
   if (request.getWriteChunk().getStage() == Stage.WRITE_DATA ||
   request.getWriteChunk().getStage() == Stage.COMBINED) {
-data = request.getWriteChunk().getData().toByteArray();
+data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
   }
 
   chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data,
@@ -713,7 +714,7 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
   putSmallFileReq.getChunkInfo());
   Preconditions.checkNotNull(chunkInfo);
-  byte[] data = putSmallFileReq.getData().toByteArray();
+  ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
   // chunks will be committed as a part of handling putSmallFile
   // here. There is no need to maintain this info in openContainerBlockMap.
   chunkManager.writeChunk(
@@ -724,7 +725,7 @@ public class KeyValueHandler extends Handler {
   blockData.setChunks(chunks);
   // TODO: add bcsId as a part of putSmallFile transaction
   blockManager.putBlock(kvContainer, blockData);
-  metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
+  metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
 
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 20598d9..718f5de 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 

hadoop git commit: HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in Datanode. Contributed by Mukul Kumar Singh.

2018-11-05 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 53d4aefae -> 4b0004488


HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in 
Datanode. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b000448
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b000448
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b000448

Branch: refs/heads/ozone-0.3
Commit: 4b00044883f5d00eea99ee885fff0761d9b6392e
Parents: 53d4aef
Author: Shashikant Banerjee 
Authored: Tue Nov 6 00:00:23 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Nov 6 00:00:23 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java | 11 +++---
 .../container/keyvalue/helpers/ChunkUtils.java  | 30 
 .../keyvalue/impl/ChunkManagerImpl.java |  2 +-
 .../keyvalue/interfaces/ChunkManager.java   |  3 +-
 .../keyvalue/TestChunkManagerImpl.java  | 37 ++--
 .../common/impl/TestContainerPersistence.java   | 28 ++-
 6 files changed, 63 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b000448/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 7c859d4..2377cd6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -76,7 +77,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import static org.apache.hadoop.hdds.HddsConfigKeys
 .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -668,10 +669,10 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
   Preconditions.checkNotNull(chunkInfo);
 
-  byte[] data = null;
+  ByteBuffer data = null;
   if (request.getWriteChunk().getStage() == Stage.WRITE_DATA ||
   request.getWriteChunk().getStage() == Stage.COMBINED) {
-data = request.getWriteChunk().getData().toByteArray();
+data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
   }
 
   chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data,
@@ -729,7 +730,7 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
   putSmallFileReq.getChunkInfo());
   Preconditions.checkNotNull(chunkInfo);
-  byte[] data = putSmallFileReq.getData().toByteArray();
+  ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
   // chunks will be committed as a part of handling putSmallFile
   // here. There is no need to maintain this info in openContainerBlockMap.
   chunkManager.writeChunk(
@@ -740,7 +741,7 @@ public class KeyValueHandler extends Handler {
   blockData.setChunks(chunks);
   // TODO: add bcsId as a part of putSmallFile transaction
   blockManager.putBlock(kvContainer, blockData);
-  metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
+  metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
 
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b000448/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 492a286..dc44dc5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 

hadoop git commit: HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in Datanode. Contributed by Mukul Kumar Singh.

2018-11-05 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk c8ca1747c -> 942693bdd


HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in 
Datanode. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942693bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942693bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942693bd

Branch: refs/heads/trunk
Commit: 942693bddd5fba51b85a5f677e3496a41817cff3
Parents: c8ca174
Author: Shashikant Banerjee 
Authored: Mon Nov 5 23:43:22 2018 +0530
Committer: Shashikant Banerjee 
Committed: Mon Nov 5 23:43:22 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java | 11 +++---
 .../container/keyvalue/helpers/ChunkUtils.java  | 28 ---
 .../keyvalue/impl/ChunkManagerImpl.java |  2 +-
 .../keyvalue/interfaces/ChunkManager.java   |  3 +-
 .../keyvalue/TestChunkManagerImpl.java  | 37 ++--
 .../common/impl/TestContainerPersistence.java   | 28 ++-
 6 files changed, 62 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4cb23ed..1271d99 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -76,7 +77,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import static org.apache.hadoop.hdds.HddsConfigKeys
 .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
@@ -652,10 +653,10 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
   Preconditions.checkNotNull(chunkInfo);
 
-  byte[] data = null;
+  ByteBuffer data = null;
   if (request.getWriteChunk().getStage() == Stage.WRITE_DATA ||
   request.getWriteChunk().getStage() == Stage.COMBINED) {
-data = request.getWriteChunk().getData().toByteArray();
+data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
   }
 
   chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data,
@@ -713,7 +714,7 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
   putSmallFileReq.getChunkInfo());
   Preconditions.checkNotNull(chunkInfo);
-  byte[] data = putSmallFileReq.getData().toByteArray();
+  ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
   // chunks will be committed as a part of handling putSmallFile
   // here. There is no need to maintain this info in openContainerBlockMap.
   chunkManager.writeChunk(
@@ -724,7 +725,7 @@ public class KeyValueHandler extends Handler {
   blockData.setChunks(chunks);
   // TODO: add bcsId as a part of putSmallFile transaction
   blockManager.putBlock(kvContainer, blockData);
-  metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
+  metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
 
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 20598d9..718f5de 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++