hadoop git commit: HDDS-708. Validate BCSID while reading blocks from containers in datanodes. Contributed by Shashikant Banerjee.

2018-10-23 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0b62983c5 -> b61846392


HDDS-708. Validate BCSID while reading blocks from containers in datanodes. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6184639
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6184639
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6184639

Branch: refs/heads/trunk
Commit: b61846392ee589a0217e95defbfa824f29088057
Parents: 0b62983
Author: Shashikant Banerjee 
Authored: Tue Oct 23 16:52:17 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Oct 23 16:52:17 2018 +0530

--
 .../scm/storage/ContainerProtocolCalls.java |  7 ++-
 .../main/proto/DatanodeContainerProtocol.proto  |  5 +-
 .../container/keyvalue/KeyValueHandler.java |  8 ++-
 .../keyvalue/impl/BlockManagerImpl.java | 40 
 .../keyvalue/interfaces/BlockManager.java   |  4 +-
 .../keyvalue/TestBlockManagerImpl.java  |  6 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |  3 +-
 .../container/TestContainerReplication.java |  2 +-
 .../common/impl/TestCloseContainerHandler.java  |  2 +-
 .../common/impl/TestContainerPersistence.java   | 64 +++-
 .../hadoop/ozone/web/client/TestKeys.java   |  4 +-
 11 files changed, 116 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6184639/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 278b129..9bf0241 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -81,14 +81,17 @@ public final class ContainerProtocolCalls  {
* @param xceiverClient client to perform call
* @param datanodeBlockID blockID to identify container
* @param traceID container protocol call args
+   * @param blockCommitSequenceId latest commit Id of the block
* @return container protocol get block response
* @throws IOException if there is an I/O error while performing the call
*/
   public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient,
-  DatanodeBlockID datanodeBlockID, String traceID) throws IOException {
+  DatanodeBlockID datanodeBlockID, String traceID,
+  long blockCommitSequenceId) throws IOException {
 GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
 .newBuilder()
-.setBlockID(datanodeBlockID);
+.setBlockID(datanodeBlockID)
+.setBlockCommitSequenceId(blockCommitSequenceId);
 String id = xceiverClient.getPipeline().getLeader().getUuidString();
 
 ContainerCommandRequestProto request = ContainerCommandRequestProto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6184639/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index da55db3..f9262ba 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -140,6 +140,8 @@ enum Result {
   UNKNOWN_CONTAINER_TYPE = 34;
   BLOCK_NOT_COMMITTED = 35;
   CONTAINER_UNHEALTHY = 36;
+  UNKNOWN_BCSID = 37;
+  BCSID_MISMATCH = 38;
 }
 
 /**
@@ -315,6 +317,7 @@ message  PutBlockResponseProto {
 
 message  GetBlockRequestProto  {
   required DatanodeBlockID blockID = 1;
+  optional uint64 blockCommitSequenceId = 2 [default = 0];
 }
 
 message  GetBlockResponseProto  {
@@ -333,7 +336,7 @@ message  GetCommittedBlockLengthRequestProto {
 message  GetCommittedBlockLengthResponseProto {
   required DatanodeBlockID blockID = 1;
   required int64 blockLength = 2;
-  optional uint64 blockCommitSequenceId = 3;
+  optional uint64 blockCommitSequenceId = 3 [default = 0];
 }
 
 message   DeleteBlockResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6184639/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/had

hadoop git commit: HDDS-708. Validate BCSID while reading blocks from containers in datanodes. Contributed by Shashikant Banerjee.

2018-10-23 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 51d2273e1 -> 0674f11fc


HDDS-708. Validate BCSID while reading blocks from containers in datanodes. 
Contributed by Shashikant Banerjee.

(cherry picked from commit b61846392ee589a0217e95defbfa824f29088057)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0674f11f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0674f11f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0674f11f

Branch: refs/heads/ozone-0.3
Commit: 0674f11fc0fd00275f1860cfdf3f877205046293
Parents: 51d2273
Author: Shashikant Banerjee 
Authored: Tue Oct 23 16:52:17 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Oct 23 17:01:24 2018 +0530

--
 .../scm/storage/ContainerProtocolCalls.java |  7 ++-
 .../main/proto/DatanodeContainerProtocol.proto  |  5 +-
 .../container/keyvalue/KeyValueHandler.java |  8 ++-
 .../keyvalue/impl/BlockManagerImpl.java | 40 
 .../keyvalue/interfaces/BlockManager.java   |  4 +-
 .../keyvalue/TestBlockManagerImpl.java  |  6 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |  3 +-
 .../container/TestContainerReplication.java |  2 +-
 .../common/impl/TestCloseContainerHandler.java  |  2 +-
 .../common/impl/TestContainerPersistence.java   | 64 +++-
 .../hadoop/ozone/web/client/TestKeys.java   |  4 +-
 11 files changed, 116 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0674f11f/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 278b129..9bf0241 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -81,14 +81,17 @@ public final class ContainerProtocolCalls  {
* @param xceiverClient client to perform call
* @param datanodeBlockID blockID to identify container
* @param traceID container protocol call args
+   * @param blockCommitSequenceId latest commit Id of the block
* @return container protocol get block response
* @throws IOException if there is an I/O error while performing the call
*/
   public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient,
-  DatanodeBlockID datanodeBlockID, String traceID) throws IOException {
+  DatanodeBlockID datanodeBlockID, String traceID,
+  long blockCommitSequenceId) throws IOException {
 GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
 .newBuilder()
-.setBlockID(datanodeBlockID);
+.setBlockID(datanodeBlockID)
+.setBlockCommitSequenceId(blockCommitSequenceId);
 String id = xceiverClient.getPipeline().getLeader().getUuidString();
 
 ContainerCommandRequestProto request = ContainerCommandRequestProto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0674f11f/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index da55db3..f9262ba 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -140,6 +140,8 @@ enum Result {
   UNKNOWN_CONTAINER_TYPE = 34;
   BLOCK_NOT_COMMITTED = 35;
   CONTAINER_UNHEALTHY = 36;
+  UNKNOWN_BCSID = 37;
+  BCSID_MISMATCH = 38;
 }
 
 /**
@@ -315,6 +317,7 @@ message  PutBlockResponseProto {
 
 message  GetBlockRequestProto  {
   required DatanodeBlockID blockID = 1;
+  optional uint64 blockCommitSequenceId = 2 [default = 0];
 }
 
 message  GetBlockResponseProto  {
@@ -333,7 +336,7 @@ message  GetCommittedBlockLengthRequestProto {
 message  GetCommittedBlockLengthResponseProto {
   required DatanodeBlockID blockID = 1;
   required int64 blockLength = 2;
-  optional uint64 blockCommitSequenceId = 3;
+  optional uint64 blockCommitSequenceId = 3 [default = 0];
 }
 
 message   DeleteBlockResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0674f11f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyva

hadoop git commit: HDDS-708. Validate BCSID while reading blocks from containers in datanodes. Contributed by Shashikant Banerjee.

2018-10-23 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 346afb0a5 -> a492edee4


HDDS-708. Validate BCSID while reading blocks from containers in datanodes. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a492edee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a492edee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a492edee

Branch: refs/heads/ozone-0.3
Commit: a492edee4b84e38552f185a4227b49abf60f762d
Parents: 346afb0
Author: Shashikant Banerjee 
Authored: Tue Oct 23 19:02:12 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Oct 23 19:02:12 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  | 194 +--
 .../hadoop/hdds/scm/XceiverClientManager.java   |  14 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |   6 +
 .../scm/container/common/helpers/Pipeline.java  |   4 +
 .../ozone/client/io/ChunkGroupInputStream.java  |   9 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   4 +
 .../hadoop/ozone/TestMiniOzoneCluster.java  |   2 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java| 107 ++
 .../ozone/scm/TestXceiverClientManager.java |  15 +-
 9 files changed, 285 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a492edee/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 2f11872..9526be3 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -22,6 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
@@ -40,6 +41,9 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
+import java.util.UUID;
+import java.util.Map;
+import java.util.HashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 
@@ -50,9 +54,9 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
   private final Pipeline pipeline;
   private final Configuration config;
-  private XceiverClientProtocolServiceStub asyncStub;
+  private Map asyncStubs;
   private XceiverClientMetrics metrics;
-  private ManagedChannel channel;
+  private Map channels;
   private final Semaphore semaphore;
   private boolean closed = false;
 
@@ -72,46 +76,62 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 this.semaphore =
 new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
 this.metrics = XceiverClientManager.getXceiverClientMetrics();
+this.channels = new HashMap<>();
+this.asyncStubs = new HashMap<>();
   }
 
   @Override
   public void connect() throws Exception {
+
+// leader by default is the 1st datanode in the datanode list of pipleline
 DatanodeDetails leader = this.pipeline.getLeader();
+// just make a connection to the 1st datanode at the beginning
+connectToDatanode(leader);
+  }
 
+  private void connectToDatanode(DatanodeDetails dn) {
 // read port from the data node, on failure use default configured
 // port.
-int port = leader.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
+int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
 if (port == 0) {
   port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
   OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
 }
-LOG.debug("Connecting to server Port : " + leader.getIpAddress());
-channel = NettyChannelBuilder.forAddress(leader.getIpAddress(), port)
-.usePlaintext()
-.maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
-.build();
-asyncStub = XceiverClientProtocolServiceGrpc.newStub(channel);
+LOG.debug("Connecting to server Port : " + dn.getIpAddr

hadoop git commit: Revert "HDDS-708. Validate BCSID while reading blocks from containers in datanodes. Contributed by Shashikant Banerjee."

2018-10-23 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 a492edee4 -> 575613ef7


Revert "HDDS-708. Validate BCSID while reading blocks from containers in 
datanodes. Contributed by Shashikant Banerjee."

This reverts commit a492edee4b84e38552f185a4227b49abf60f762d.
The commit msg is wrong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/575613ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/575613ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/575613ef

Branch: refs/heads/ozone-0.3
Commit: 575613ef7a051d3a26ef8d895ef10498cb71d667
Parents: a492ede
Author: Shashikant Banerjee 
Authored: Tue Oct 23 19:14:18 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Oct 23 19:14:18 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  | 194 ++-
 .../hadoop/hdds/scm/XceiverClientManager.java   |  14 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |   6 -
 .../scm/container/common/helpers/Pipeline.java  |   4 -
 .../ozone/client/io/ChunkGroupInputStream.java  |   9 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   4 -
 .../hadoop/ozone/TestMiniOzoneCluster.java  |   2 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java| 107 --
 .../ozone/scm/TestXceiverClientManager.java |  15 +-
 9 files changed, 70 insertions(+), 285 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/575613ef/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 9526be3..2f11872 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
@@ -41,9 +40,6 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
-import java.util.UUID;
-import java.util.Map;
-import java.util.HashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 
@@ -54,9 +50,9 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
   private final Pipeline pipeline;
   private final Configuration config;
-  private Map asyncStubs;
+  private XceiverClientProtocolServiceStub asyncStub;
   private XceiverClientMetrics metrics;
-  private Map channels;
+  private ManagedChannel channel;
   private final Semaphore semaphore;
   private boolean closed = false;
 
@@ -76,62 +72,46 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 this.semaphore =
 new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
 this.metrics = XceiverClientManager.getXceiverClientMetrics();
-this.channels = new HashMap<>();
-this.asyncStubs = new HashMap<>();
   }
 
   @Override
   public void connect() throws Exception {
-
-// leader by default is the 1st datanode in the datanode list of pipleline
 DatanodeDetails leader = this.pipeline.getLeader();
-// just make a connection to the 1st datanode at the beginning
-connectToDatanode(leader);
-  }
 
-  private void connectToDatanode(DatanodeDetails dn) {
 // read port from the data node, on failure use default configured
 // port.
-int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
+int port = leader.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
 if (port == 0) {
   port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
   OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
 }
-LOG.debug("Connecting to server Port : " + dn.getIpAddress());
-ManagedChannel channel =
-NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext()
-
.maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
-.build();
-Xceiver

hadoop git commit: HDDS-676. Enable Read from open Containers via Standalone Protocol. Contributed by Shashikant Banerjee.

2018-10-23 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 575613ef7 -> 3191afa12


HDDS-676. Enable Read from open Containers via Standalone Protocol. Contributed 
by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3191afa1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3191afa1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3191afa1

Branch: refs/heads/ozone-0.3
Commit: 3191afa12134cb85ff3af68d50906a352b4b6979
Parents: 575613e
Author: Shashikant Banerjee 
Authored: Tue Oct 23 19:19:58 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Oct 23 19:19:58 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  | 194 +--
 .../hadoop/hdds/scm/XceiverClientManager.java   |  14 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |   6 +
 .../scm/container/common/helpers/Pipeline.java  |   4 +
 .../ozone/client/io/ChunkGroupInputStream.java  |   9 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   4 +
 .../hadoop/ozone/TestMiniOzoneCluster.java  |   2 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java| 107 ++
 .../ozone/scm/TestXceiverClientManager.java |  15 +-
 9 files changed, 285 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3191afa1/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 2f11872..9526be3 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -22,6 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
@@ -40,6 +41,9 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
+import java.util.UUID;
+import java.util.Map;
+import java.util.HashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 
@@ -50,9 +54,9 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
   private final Pipeline pipeline;
   private final Configuration config;
-  private XceiverClientProtocolServiceStub asyncStub;
+  private Map asyncStubs;
   private XceiverClientMetrics metrics;
-  private ManagedChannel channel;
+  private Map channels;
   private final Semaphore semaphore;
   private boolean closed = false;
 
@@ -72,46 +76,62 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 this.semaphore =
 new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
 this.metrics = XceiverClientManager.getXceiverClientMetrics();
+this.channels = new HashMap<>();
+this.asyncStubs = new HashMap<>();
   }
 
   @Override
   public void connect() throws Exception {
+
+// leader by default is the 1st datanode in the datanode list of pipleline
 DatanodeDetails leader = this.pipeline.getLeader();
+// just make a connection to the 1st datanode at the beginning
+connectToDatanode(leader);
+  }
 
+  private void connectToDatanode(DatanodeDetails dn) {
 // read port from the data node, on failure use default configured
 // port.
-int port = leader.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
+int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
 if (port == 0) {
   port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
   OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
 }
-LOG.debug("Connecting to server Port : " + leader.getIpAddress());
-channel = NettyChannelBuilder.forAddress(leader.getIpAddress(), port)
-.usePlaintext()
-.maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
-.build();
-asyncStub = XceiverClientProtocolServiceGrpc.newStub(channel);
+LOG.debug("Connecting to server Port : " + dn.getIpAddr

hadoop git commit: Revert "HDDS-579. ContainerStateMachine should fail subsequent transactions per container in case one fails. Contributed by Shashikant Banerjee."

2018-10-24 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 3191afa12 -> 27bbbd0f9


Revert "HDDS-579. ContainerStateMachine should fail subsequent transactions per 
container in case one fails. Contributed by Shashikant Banerjee."

This reverts commit a619d120a6c44bde2a846d61505a94f896e58e46.
This change introduces a new state for container replica called UNHEALTHY, 
which is not handled in SCM currently.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27bbbd0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27bbbd0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27bbbd0f

Branch: refs/heads/ozone-0.3
Commit: 27bbbd0f9df0acba2ceb0d2acb8c8646cc10aa3f
Parents: 3191afa
Author: Shashikant Banerjee 
Authored: Wed Oct 24 15:05:50 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Oct 24 15:05:50 2018 +0530

--
 .../main/proto/DatanodeContainerProtocol.proto  |   8 +-
 .../container/common/impl/HddsDispatcher.java   |  63 ++-
 .../container/keyvalue/KeyValueHandler.java |  20 +-
 .../StorageContainerDatanodeProtocol.proto  |   1 -
 .../rpc/TestContainerStateMachineFailures.java  | 185 ---
 5 files changed, 33 insertions(+), 244 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27bbbd0f/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index f9262ba..42be933 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -139,9 +139,8 @@ enum Result {
   CONTAINER_CHECKSUM_ERROR = 33;
   UNKNOWN_CONTAINER_TYPE = 34;
   BLOCK_NOT_COMMITTED = 35;
-  CONTAINER_UNHEALTHY = 36;
-  UNKNOWN_BCSID = 37;
-  BCSID_MISMATCH = 38;
+  UNKNOWN_BCSID = 36;
+  BCSID_MISMATCH = 37;
 }
 
 /**
@@ -164,8 +163,7 @@ enum ContainerLifeCycleState {
 OPEN = 1;
 CLOSING = 2;
 CLOSED = 3;
-UNHEALTHY = 4;
-INVALID = 5;
+INVALID = 4;
 }
 
 message ContainerCommandRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27bbbd0f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 1849841..bb5002a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -142,26 +142,6 @@ public class HddsDispatcher implements ContainerDispatcher 
{
 responseProto = handler.handle(msg, container);
 if (responseProto != null) {
   metrics.incContainerOpsLatencies(cmdType, System.nanoTime() - startTime);
-
-  // If the request is of Write Type and the container operation
-  // is unsuccessful, it implies the applyTransaction on the container
-  // failed. All subsequent transactions on the container should fail and
-  // hence replica will be marked unhealthy here. In this case, a close
-  // container action will be sent to SCM to close the container.
-  if (!HddsUtils.isReadOnly(msg)
-  && responseProto.getResult() != ContainerProtos.Result.SUCCESS) {
-// If the container is open and the container operation has failed,
-// it should be first marked unhealthy and the initiate the close
-// container action. This also implies this is the first transaction
-// which has failed, so the container is marked unhealthy right here.
-// Once container is marked unhealthy, all the subsequent write
-// transactions will fail with UNHEALTHY_CONTAINER exception.
-if (container.getContainerState() == ContainerLifeCycleState.OPEN) {
-  container.getContainerData()
-  .setState(ContainerLifeCycleState.UNHEALTHY);
-  sendCloseContainerActionIfNeeded(container);
-}
-  }
   return responseProto;
 } else {
   return ContainerUtils.unsupportedRequest(msg);
@@ -169,46 +149,31 @@ public class HddsDispatcher implements 
ContainerDispatcher {
   }
 
   /**
-   * If the container usage reaches the close threshold or the container is
-   * marked unhealthy we send Close ContainerAction to SCM.
+   * If the container usage reaches the cl

hadoop git commit: HDDS-716. Update ozone to latest ratis snapshot build(0.3.0-aa38160-SNAPSHOT). Contributed by Mukul Kumar Singh.

2018-10-24 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 553044818 -> 0891cdda7


HDDS-716. Update ozone to latest ratis snapshot build(0.3.0-aa38160-SNAPSHOT). 
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0891cdda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0891cdda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0891cdda

Branch: refs/heads/trunk
Commit: 0891cdda7961f7d0d7debdb8e89b7816f39f7c7b
Parents: 5530448
Author: Shashikant Banerjee 
Authored: Wed Oct 24 16:04:57 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Oct 24 16:05:08 2018 +0530

--
 .../server/ratis/ContainerStateMachine.java | 72 +---
 hadoop-project/pom.xml  |  2 +-
 2 files changed, 48 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0891cdda/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index fa9fbf3..bcbf93f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -22,6 +22,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto;
 import org.apache.ratis.protocol.RaftGroup;
 import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.server.RaftServer;
@@ -49,7 +50,7 @@ import org.apache.ratis.server.storage.RaftStorage;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
 import org.apache.ratis.proto.RaftProtos.LogEntryProto;
-import org.apache.ratis.proto.RaftProtos.SMLogEntryProto;
+import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto;
 import org.apache.ratis.statemachine.StateMachineStorage;
 import org.apache.ratis.statemachine.TransactionContext;
 import org.apache.ratis.statemachine.impl.BaseStateMachine;
@@ -207,7 +208,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 final ContainerCommandRequestProto proto =
 getRequestProto(request.getMessage().getContent());
 
-final SMLogEntryProto log;
+final StateMachineLogEntryProto log;
 if (proto.getCmdType() == Type.WriteChunk) {
   final WriteChunkRequestProto write = proto.getWriteChunk();
   // create the state machine data proto
@@ -237,23 +238,39 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   .setWriteChunk(commitWriteChunkProto)
   .build();
 
-  log = SMLogEntryProto.newBuilder()
-  .setData(commitContainerCommandProto.toByteString())
-  .setStateMachineData(dataContainerCommandProto.toByteString())
-  .build();
+  log = createSMLogEntryProto(request,
+  commitContainerCommandProto.toByteString(),
+  dataContainerCommandProto.toByteString());
 } else if (proto.getCmdType() == Type.CreateContainer) {
-  log = SMLogEntryProto.newBuilder()
-  .setData(request.getMessage().getContent())
-  .setStateMachineData(request.getMessage().getContent())
-  .build();
+  log = createSMLogEntryProto(request,
+  request.getMessage().getContent(), 
request.getMessage().getContent());
 } else {
-  log = SMLogEntryProto.newBuilder()
-  .setData(request.getMessage().getContent())
-  .build();
+  log = createSMLogEntryProto(request, request.getMessage().getContent(),
+  null);
 }
 return new TransactionContextImpl(this, request, log);
   }
 
+  private StateMachineLogEntryProto createSMLogEntryProto(RaftClientRequest r,
+  ByteString logData, ByteString smData) {
+StateMachineLogEntryProto.Builder builder =
+StateMachineLogEntryProto.newBuilder();
+
+builder.setCallId(r.getCallId())
+.setClientId(r.getClientId().toByteString())
+.setLogData(logData);
+
+if (smData != null) {
+  builder.setStateMachineEntry(StateMachineEntryProto.newBuil

hadoop git commit: HDDS-749. Restructure BlockId class in Ozone. Contributed by Shashikant Banerjee.

2018-10-30 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 486b9a4a7 -> 7757331db


HDDS-749. Restructure BlockId class in Ozone. Contributed by Shashikant 
Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7757331d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7757331d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7757331d

Branch: refs/heads/trunk
Commit: 7757331dbc043694891a5242ac161adece9e8d6a
Parents: 486b9a4
Author: Shashikant Banerjee 
Authored: Tue Oct 30 14:15:27 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Oct 30 14:15:27 2018 +0530

--
 .../hdds/scm/storage/ChunkOutputStream.java | 17 ++--
 .../org/apache/hadoop/hdds/client/BlockID.java  | 85 ++--
 .../hadoop/hdds/client/ContainerBlockID.java| 79 ++
 .../common/helpers/AllocatedBlock.java  | 21 ++---
 ...kLocationProtocolClientSideTranslatorPB.java |  5 +-
 .../scm/storage/ContainerProtocolCalls.java |  7 +-
 .../apache/hadoop/ozone/common/BlockGroup.java  |  3 +-
 .../container/common/helpers/BlockData.java |  8 +-
 ...kLocationProtocolServerSideTranslatorPB.java |  2 +-
 .../main/proto/DatanodeContainerProtocol.proto  |  4 +-
 .../main/proto/ScmBlockLocationProtocol.proto   |  2 +-
 hadoop-hdds/common/src/main/proto/hdds.proto|  7 +-
 .../container/keyvalue/KeyValueHandler.java |  5 +-
 .../container/keyvalue/helpers/BlockUtils.java  |  2 -
 .../keyvalue/impl/BlockManagerImpl.java |  6 +-
 .../keyvalue/interfaces/BlockManager.java   |  3 +-
 .../keyvalue/TestBlockManagerImpl.java  |  6 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  3 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |  3 +-
 .../ozone/client/io/ChunkGroupOutputStream.java | 21 ++---
 .../ozone/om/helpers/OmKeyLocationInfo.java | 19 +
 .../src/main/proto/OzoneManagerProtocol.proto   |  1 -
 .../container/TestContainerReplication.java |  2 +-
 .../common/impl/TestCloseContainerHandler.java  |  2 +-
 .../common/impl/TestContainerPersistence.java   | 14 ++--
 .../TestGetCommittedBlockLengthAndPutKey.java   | 16 ++--
 .../hadoop/ozone/web/client/TestKeys.java   |  2 +-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  7 +-
 .../ozone/om/ScmBlockLocationTestIngClient.java |  3 +-
 29 files changed, 227 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 4547163..4e881c4 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm.storage;
 
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.commons.codec.digest.DigestUtils;
@@ -57,7 +58,7 @@ import static 
org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
  */
 public class ChunkOutputStream extends OutputStream {
 
-  private final BlockID blockID;
+  private BlockID blockID;
   private final String key;
   private final String traceID;
   private final BlockData.Builder containerBlockData;
@@ -67,7 +68,6 @@ public class ChunkOutputStream extends OutputStream {
   private final String streamId;
   private int chunkIndex;
   private int chunkSize;
-  private long blockCommitSequenceId;
 
   /**
* Creates a new ChunkOutputStream.
@@ -96,15 +96,14 @@ public class ChunkOutputStream extends OutputStream {
 this.buffer = ByteBuffer.allocate(chunkSize);
 this.streamId = UUID.randomUUID().toString();
 this.chunkIndex = 0;
-blockCommitSequenceId = 0;
   }
 
   public ByteBuffer getBuffer() {
 return buffer;
   }
 
-  public long getBlockCommitSequenceId() {
-return blockCommitSequenceId;
+  public BlockID getBlockID() {
+return blockID;
   }
 
   @Override
@@ -165,8 +164,12 @@ public class ChunkOutputStream extends OutputStream {
   try {
 ContainerProtos.PutBlockResponseProto responseProto =
 putBlock(xceiverClient, containerBlockData.build(), traceID);
-blockCommitSequenceId =
-responseProto.getCommittedBlockLength().getBlockCommitSequenceId();
+BlockID responseBlockID = BlockID.getFromProto

hadoop git commit: HDDS-697. update and validate the BCSID for PutSmallFile/GetSmallFile command. Contributed by Shashikant Banerjee.

2018-10-31 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk c5eb237e3 -> b13c56742


HDDS-697. update and validate the BCSID for PutSmallFile/GetSmallFile command. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b13c5674
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b13c5674
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b13c5674

Branch: refs/heads/trunk
Commit: b13c56742a6fc0f6cb1ddd63e1afd51eb216e052
Parents: c5eb237
Author: Shashikant Banerjee 
Authored: Thu Nov 1 10:21:25 2018 +0530
Committer: Shashikant Banerjee 
Committed: Thu Nov 1 10:21:39 2018 +0530

--
 .../scm/storage/ContainerProtocolCalls.java |  8 ++-
 .../main/proto/DatanodeContainerProtocol.proto  |  2 +-
 .../server/ratis/ContainerStateMachine.java | 24 ++---
 .../container/keyvalue/KeyValueHandler.java |  2 -
 .../container/keyvalue/helpers/BlockUtils.java  |  2 +-
 .../keyvalue/helpers/SmallFileUtils.java|  7 +++
 .../rpc/TestContainerStateMachineFailures.java  | 21 +---
 .../ozone/scm/TestContainerSmallFile.java   | 51 
 8 files changed, 85 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b13c5674/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 150b1d6..c1d90a5 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -59,6 +59,8 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .WriteChunkRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+PutSmallFileResponseProto;
 import org.apache.hadoop.hdds.client.BlockID;
 
 import java.io.IOException;
@@ -231,10 +233,11 @@ public final class ContainerProtocolCalls  {
* @param blockID - ID of the block
* @param data - Data to be written into the container.
* @param traceID - Trace ID for logging purpose.
+   * @return container protocol writeSmallFile response
* @throws IOException
*/
-  public static void writeSmallFile(XceiverClientSpi client,
-  BlockID blockID, byte[] data, String traceID)
+  public static PutSmallFileResponseProto writeSmallFile(
+  XceiverClientSpi client, BlockID blockID, byte[] data, String traceID)
   throws IOException {
 
 BlockData containerBlockData =
@@ -268,6 +271,7 @@ public final class ContainerProtocolCalls  {
 .build();
 ContainerCommandResponseProto response = client.sendCommand(request);
 validateContainerResponse(response);
+return response.getPutSmallFile();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b13c5674/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 1700e23..df26f24 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -413,7 +413,7 @@ message PutSmallFileRequestProto {
 
 
 message PutSmallFileResponseProto {
-
+  required GetCommittedBlockLengthResponseProto committedBlockLength = 1;
 }
 
 message GetSmallFileRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b13c5674/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index ac0833b..d5762bc 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ra

hadoop git commit: HDDS-771. ChunkGroupOutputStream stream entries need to be properly updated on closed container exception. Contributed by Lokesh Jain.

2018-11-01 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e8ac14dc -> e0ac3081e


HDDS-771. ChunkGroupOutputStream stream entries need to be properly updated on 
closed container exception. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0ac3081
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0ac3081
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0ac3081

Branch: refs/heads/trunk
Commit: e0ac3081e95bc70b13c36a2cad1565ecc35dec52
Parents: 2e8ac14
Author: Shashikant Banerjee 
Authored: Thu Nov 1 15:43:48 2018 +0530
Committer: Shashikant Banerjee 
Committed: Thu Nov 1 15:43:48 2018 +0530

--
 .../ozone/client/io/ChunkGroupOutputStream.java |  6 +-
 .../rpc/TestCloseContainerHandlingByClient.java | 60 
 2 files changed, 65 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ac3081/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 78d69c1..3fe5d93 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -413,6 +413,11 @@ public class ChunkGroupOutputStream extends OutputStream {
   return;
 }
 
+// update currentStreamIndex in case of closed container exception. The
+// current stream entry cannot be used for further writes because
+// container is closed.
+currentStreamIndex += 1;
+
 // In case where not a single chunk of data has been written to the 
Datanode
 // yet. This block does not yet exist on the datanode but cached on the
 // outputStream buffer. No need to call GetCommittedBlockLength here
@@ -429,7 +434,6 @@ public class ChunkGroupOutputStream extends OutputStream {
   // allocate new block and write this data in the datanode. The cached
   // data in the buffer does not exceed chunkSize.
   Preconditions.checkState(buffer.position() < chunkSize);
-  currentStreamIndex += 1;
   // readjust the byteOffset value to the length actually been written.
   byteOffset -= buffer.position();
   handleWrite(buffer.array(), 0, buffer.position());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ac3081/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index d06a0bc..c6ee872 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
@@ -287,6 +289,64 @@ public class TestCloseContainerHandlingByClient {
 validateData(keyName, dataString.concat(dataString2).getBytes());
   }
 
+  @Test
+  public void testMultiBlockWrites3() throws Exception {
+
+String keyName = "standalone5";
+int keyLen = 4 * blockSize;
+OzoneOutputStream key =
+createKey(keyName, ReplicationType.RATIS, keyLen);
+ChunkGroupOutputStream groupOutputStream =
+(ChunkGroupOutputStream) key.getOutputStream();
+// With the initial size provided, it should have preallocated 4 blocks
+Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
+// write data 3 blocks and one more chunk
+byte[] writtenData = fixedLengthString(keyString, keyLen).getBytes();
+byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + 
chunkSize);
+Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
+key.write(data);
+
+A

hadoop git commit: HDDS-771. ChunkGroupOutputStream stream entries need to be properly updated on closed container exception. Contributed by Lokesh Jain.

2018-11-01 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 73e9e4348 -> 39d088bc2


HDDS-771. ChunkGroupOutputStream stream entries need to be properly updated on 
closed container exception. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39d088bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39d088bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39d088bc

Branch: refs/heads/ozone-0.3
Commit: 39d088bc2ca1d51658eb087e0b58d1cff3dd64a7
Parents: 73e9e43
Author: Shashikant Banerjee 
Authored: Thu Nov 1 15:50:15 2018 +0530
Committer: Shashikant Banerjee 
Committed: Thu Nov 1 15:50:15 2018 +0530

--
 .../ozone/client/io/ChunkGroupOutputStream.java |  6 +-
 .../rpc/TestCloseContainerHandlingByClient.java | 60 
 2 files changed, 65 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39d088bc/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index d9a9910..0772360 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -420,6 +420,11 @@ public class ChunkGroupOutputStream extends OutputStream {
   return;
 }
 
+// update currentStreamIndex in case of closed container exception. The
+// current stream entry cannot be used for further writes because
+// container is closed.
+currentStreamIndex += 1;
+
 // In case where not a single chunk of data has been written to the 
Datanode
 // yet. This block does not yet exist on the datanode but cached on the
 // outputStream buffer. No need to call GetCommittedBlockLength here
@@ -436,7 +441,6 @@ public class ChunkGroupOutputStream extends OutputStream {
   // allocate new block and write this data in the datanode. The cached
   // data in the buffer does not exceed chunkSize.
   Preconditions.checkState(buffer.position() < chunkSize);
-  currentStreamIndex += 1;
   // readjust the byteOffset value to the length actually been written.
   byteOffset -= buffer.position();
   handleWrite(buffer.array(), 0, buffer.position());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39d088bc/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index da8d334..408ff8b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
@@ -286,6 +288,64 @@ public class TestCloseContainerHandlingByClient {
 validateData(keyName, dataString.concat(dataString2).getBytes());
   }
 
+  @Test
+  public void testMultiBlockWrites3() throws Exception {
+
+String keyName = "standalone5";
+int keyLen = 4 * blockSize;
+OzoneOutputStream key =
+createKey(keyName, ReplicationType.RATIS, keyLen);
+ChunkGroupOutputStream groupOutputStream =
+(ChunkGroupOutputStream) key.getOutputStream();
+// With the initial size provided, it should have preallocated 4 blocks
+Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
+// write data 3 blocks and one more chunk
+byte[] writtenData = fixedLengthString(keyString, keyLen).getBytes();
+byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + 
chunkSize);
+Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
+key.write(data);
+
+A

hadoop git commit: HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in Datanode. Contributed by Mukul Kumar Singh.

2018-11-05 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk c8ca1747c -> 942693bdd


HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in 
Datanode. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942693bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942693bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942693bd

Branch: refs/heads/trunk
Commit: 942693bddd5fba51b85a5f677e3496a41817cff3
Parents: c8ca174
Author: Shashikant Banerjee 
Authored: Mon Nov 5 23:43:22 2018 +0530
Committer: Shashikant Banerjee 
Committed: Mon Nov 5 23:43:22 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java | 11 +++---
 .../container/keyvalue/helpers/ChunkUtils.java  | 28 ---
 .../keyvalue/impl/ChunkManagerImpl.java |  2 +-
 .../keyvalue/interfaces/ChunkManager.java   |  3 +-
 .../keyvalue/TestChunkManagerImpl.java  | 37 ++--
 .../common/impl/TestContainerPersistence.java   | 28 ++-
 6 files changed, 62 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4cb23ed..1271d99 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -76,7 +77,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import static org.apache.hadoop.hdds.HddsConfigKeys
 .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
@@ -652,10 +653,10 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
   Preconditions.checkNotNull(chunkInfo);
 
-  byte[] data = null;
+  ByteBuffer data = null;
   if (request.getWriteChunk().getStage() == Stage.WRITE_DATA ||
   request.getWriteChunk().getStage() == Stage.COMBINED) {
-data = request.getWriteChunk().getData().toByteArray();
+data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
   }
 
   chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data,
@@ -713,7 +714,7 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
   putSmallFileReq.getChunkInfo());
   Preconditions.checkNotNull(chunkInfo);
-  byte[] data = putSmallFileReq.getData().toByteArray();
+  ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
   // chunks will be committed as a part of handling putSmallFile
   // here. There is no need to maintain this info in openContainerBlockMap.
   chunkManager.writeChunk(
@@ -724,7 +725,7 @@ public class KeyValueHandler extends Handler {
   blockData.setChunks(chunks);
   // TODO: add bcsId as a part of putSmallFile transaction
   blockManager.putBlock(kvContainer, blockData);
-  metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
+  metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
 
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/942693bd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 20598d9..718f5de 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 
b/hadoop-h

hadoop git commit: HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in Datanode. Contributed by Mukul Kumar Singh.

2018-11-05 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 53d4aefae -> 4b0004488


HDDS-799. Avoid ByteString to byte array conversion cost by using ByteBuffer in 
Datanode. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b000448
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b000448
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b000448

Branch: refs/heads/ozone-0.3
Commit: 4b00044883f5d00eea99ee885fff0761d9b6392e
Parents: 53d4aef
Author: Shashikant Banerjee 
Authored: Tue Nov 6 00:00:23 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Nov 6 00:00:23 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java | 11 +++---
 .../container/keyvalue/helpers/ChunkUtils.java  | 30 
 .../keyvalue/impl/ChunkManagerImpl.java |  2 +-
 .../keyvalue/interfaces/ChunkManager.java   |  3 +-
 .../keyvalue/TestChunkManagerImpl.java  | 37 ++--
 .../common/impl/TestContainerPersistence.java   | 28 ++-
 6 files changed, 63 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b000448/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 7c859d4..2377cd6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -76,7 +77,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import static org.apache.hadoop.hdds.HddsConfigKeys
 .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -668,10 +669,10 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
   Preconditions.checkNotNull(chunkInfo);
 
-  byte[] data = null;
+  ByteBuffer data = null;
   if (request.getWriteChunk().getStage() == Stage.WRITE_DATA ||
   request.getWriteChunk().getStage() == Stage.COMBINED) {
-data = request.getWriteChunk().getData().toByteArray();
+data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
   }
 
   chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data,
@@ -729,7 +730,7 @@ public class KeyValueHandler extends Handler {
   ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
   putSmallFileReq.getChunkInfo());
   Preconditions.checkNotNull(chunkInfo);
-  byte[] data = putSmallFileReq.getData().toByteArray();
+  ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
   // chunks will be committed as a part of handling putSmallFile
   // here. There is no need to maintain this info in openContainerBlockMap.
   chunkManager.writeChunk(
@@ -740,7 +741,7 @@ public class KeyValueHandler extends Handler {
   blockData.setChunks(chunks);
   // TODO: add bcsId as a part of putSmallFile transaction
   blockManager.putBlock(kvContainer, blockData);
-  metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
+  metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
 
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b000448/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 492a286..dc44dc5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 
b/hadoop-h

hadoop git commit: HDDS-794. addendum patch to fix compilation failure. Contributed by Shashikant Banerjee.

2018-11-05 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5ddefdd50 -> 50f40e053


HDDS-794. addendum patch to fix compilation failure. Contributed by Shashikant 
Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50f40e05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50f40e05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50f40e05

Branch: refs/heads/trunk
Commit: 50f40e0536f38517aa33e8859f299bcf19f2f319
Parents: 5ddefdd
Author: Shashikant Banerjee 
Authored: Tue Nov 6 00:20:57 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Nov 6 00:20:57 2018 +0530

--
 .../apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50f40e05/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 8f9d589..dc44dc5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -139,7 +139,7 @@ public final class ChunkUtils {
   }
 }
 log.debug("Write Chunk completed for chunkFile: {}, size {}", chunkFile,
-data.length);
+bufferSize);
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-806. Update Ratis to latest snapshot version in ozone. Contributed by Tsz Wo Nicholas Sze and Mukul Kumar Singh.

2018-11-08 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8d99648c2 -> 31614bcc7


HDDS-806. Update Ratis to latest snapshot version in ozone. Contributed by Tsz 
Wo Nicholas Sze and Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31614bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31614bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31614bcc

Branch: refs/heads/trunk
Commit: 31614bcc7cda614c45769aa779a839b25c375db2
Parents: 8d99648
Author: Shashikant Banerjee 
Authored: Fri Nov 9 00:05:45 2018 +0530
Committer: Shashikant Banerjee 
Committed: Fri Nov 9 00:05:45 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  8 
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 10 
 .../common/src/main/resources/ozone-default.xml | 15 ++
 .../server/ratis/ContainerStateMachine.java | 48 +---
 .../server/ratis/XceiverServerRatis.java| 11 +
 hadoop-hdds/pom.xml |  2 +-
 hadoop-ozone/pom.xml|  2 +-
 7 files changed, 67 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 38eec61..cedcc43 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -85,6 +85,14 @@ public final class ScmConfigKeys {
   public static final TimeDuration
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
   TimeDuration.valueOf(10, TimeUnit.SECONDS);
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+  "dfs.container.ratis.statemachinedata.sync.retries";
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
+  "dfs.container.ratis.log.queue.size";
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT = 128;
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
   "dfs.ratis.client.request.timeout.duration";
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 54b1cf8..9776817 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -261,6 +261,16 @@ public final class OzoneConfigKeys {
   public static final TimeDuration
   DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
   ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES;
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE;
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT;
   public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
   ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5ff60eb..2ffc2ab 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -60,6 +60,21 @@
 
   
   
+dfs.container.ratis.statemachinedata.sync.retries
+-1
+OZONE, DEBUG, CONTAINER, RATIS
+Number of times the WriteStateMachineDa

hadoop git commit: HDDS-806. Update Ratis to latest snapshot version in ozone. Contributed by Tsz Wo Nicholas Sze and Mukul Kumar Singh.

2018-11-08 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 aed5046f8 -> 788cf061c


HDDS-806. Update Ratis to latest snapshot version in ozone. Contributed by Tsz 
Wo Nicholas Sze and Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/788cf061
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/788cf061
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/788cf061

Branch: refs/heads/ozone-0.3
Commit: 788cf061cd3c578ce69474e913eb27a9e059a750
Parents: aed5046
Author: Shashikant Banerjee 
Authored: Fri Nov 9 00:05:45 2018 +0530
Committer: Shashikant Banerjee 
Committed: Fri Nov 9 00:59:48 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  8 
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 10 
 .../common/src/main/resources/ozone-default.xml | 15 ++
 .../server/ratis/ContainerStateMachine.java | 48 +---
 .../server/ratis/XceiverServerRatis.java| 11 +
 hadoop-project/pom.xml  |  2 +-
 6 files changed, 66 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/788cf061/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 11e6a23..f2cebe9 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -80,6 +80,14 @@ public final class ScmConfigKeys {
   public static final TimeDuration
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
   TimeDuration.valueOf(10, TimeUnit.SECONDS);
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+  "dfs.container.ratis.statemachinedata.sync.retries";
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
+  "dfs.container.ratis.log.queue.size";
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT = 128;
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
   "dfs.ratis.client.request.timeout.duration";
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/788cf061/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 5e9fe08..49e408f 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -264,6 +264,16 @@ public final class OzoneConfigKeys {
   public static final TimeDuration
   DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
   ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES;
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE;
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT;
   public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
   ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/788cf061/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 2e250fa..512b3ee 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -60,6 +60,21 @@
 
   
   
+dfs.container.ratis.statemachinedata.sync.retries
+-1
+OZONE, DEBUG, CONTAINER, RATIS
+Number of times the WriteStateMachineData op will be tried
+  before failing, if th

[1/2] hadoop git commit: HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. Contributed by Shashikant Banerjee.

2018-11-13 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 75291e6d5 -> 671fd6524


http://git-wip-us.apache.org/repos/asf/hadoop/blob/671fd652/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 43517ae..935423d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
-import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -27,11 +26,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-StorageContainerException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -55,15 +49,17 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 import java.io.IOException;
-import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.TimeUnit;
+
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 
 /**
  * Tests Close Container Exception handling by Ozone Client.
@@ -79,7 +75,6 @@ public class TestCloseContainerHandlingByClient {
   private static String volumeName;
   private static String bucketName;
   private static String keyString;
-  private static int maxRetries;
 
   /**
* Create a MiniDFSCluster for testing.
@@ -91,15 +86,14 @@ public class TestCloseContainerHandlingByClient {
   @BeforeClass
   public static void init() throws Exception {
 conf = new OzoneConfiguration();
-maxRetries = 100;
-conf.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, maxRetries);
-conf.set(OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL, "200ms");
 chunkSize = (int) OzoneConsts.MB;
 blockSize = 4 * chunkSize;
-conf.setInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize);
+conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
+conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, 
TimeUnit.MILLISECONDS);
+conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
+conf.setQuietMode(false);
 conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, (4));
-cluster = MiniOzoneCluster.newBuilder(conf)
-.setNumDatanodes(3).build();
+cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7).build();
 cluster.waitForClusterToBeReady();
 //the easiest way to create an open container is creating a key
 client = OzoneClientFactory.getClient(conf);
@@ -121,44 +115,29 @@ public class TestCloseContainerHandlingByClient {
 }
   }
 
-  private static String fixedLengthString(String string, int length) {
-return String.format("%1$" + length + "s", string);
-  }
-
   @Test
   public void testBlockWritesWithFlushAndClose() throws Exception {
 String keyName = "standalone";
-OzoneOutputStream key =
-createKey(keyName, ReplicationType.STAND_ALONE, 0);
+OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
 // write data more than 1 chunk
-byte[] data =
-fixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
+byte[] data = ContainerTestHelper
+.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
 key.write(data);
 
 Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
 //get the name of a valid container
 OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-.setBucketName(bucketName)
-.setType(HddsProtos.ReplicationType.STAND_ALONE)
+.setBucke

[2/2] hadoop git commit: HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. Contributed by Shashikant Banerjee.

2018-11-13 Thread shashikant
HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/671fd652
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/671fd652
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/671fd652

Branch: refs/heads/trunk
Commit: 671fd6524b2640474de2bc3b8dbaa0a3cf7fcf01
Parents: 75291e6
Author: Shashikant Banerjee 
Authored: Tue Nov 13 23:39:14 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Nov 13 23:39:14 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  28 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |  65 ++-
 .../hdds/scm/storage/ChunkOutputStream.java | 448 +++
 .../hdds/scm/XceiverClientAsyncReply.java   |  98 
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  12 +-
 .../scm/storage/ContainerProtocolCalls.java |  57 ++-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  24 +-
 .../common/src/main/resources/ozone-default.xml |  26 +-
 .../keyvalue/impl/BlockManagerImpl.java |   3 +
 .../hadoop/ozone/client/OzoneClientUtils.java   |  27 --
 .../ozone/client/io/ChunkGroupOutputStream.java | 337 +++---
 .../hadoop/ozone/client/rpc/RpcClient.java  |  27 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  45 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  19 +
 .../apache/hadoop/ozone/RatisTestHelper.java|   2 +-
 .../rpc/TestCloseContainerHandlingByClient.java | 252 +++
 .../rpc/TestContainerStateMachineFailures.java  |  20 +-
 .../client/rpc/TestFailureHandlingByClient.java | 213 +
 .../ozone/container/ContainerTestHelper.java|  34 ++
 .../container/ozoneimpl/TestOzoneContainer.java |   2 +-
 .../ozone/scm/TestXceiverClientMetrics.java |   3 +-
 .../ozone/web/TestOzoneRestWithMiniCluster.java |   2 +-
 .../web/storage/DistributedStorageHandler.java  |  42 +-
 .../hadoop/ozone/freon/TestDataValidate.java|   6 +
 .../ozone/freon/TestRandomKeyGenerator.java |   6 +
 25 files changed, 1248 insertions(+), 550 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/671fd652/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index cc34e27..9acd832 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdds.scm;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
@@ -47,6 +48,7 @@ import java.util.Map;
 import java.util.HashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
 /**
  * A Client for the storageContainer protocol.
@@ -163,7 +165,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 // In case the command gets retried on a 2nd datanode,
 // sendCommandAsyncCall will create a new channel and async stub
 // in case these don't exist for the specific datanode.
-responseProto = sendCommandAsync(request, dn).get();
+responseProto = sendCommandAsync(request, dn).getResponse().get();
 if (responseProto.getResult() == ContainerProtos.Result.SUCCESS) {
   break;
 }
@@ -197,13 +199,23 @@ public class XceiverClientGrpc extends XceiverClientSpi {
* @throws IOException
*/
   @Override
-  public CompletableFuture sendCommandAsync(
+  public XceiverClientAsyncReply sendCommandAsync(
   ContainerCommandRequestProto request)
   throws IOException, ExecutionException, InterruptedException {
-return sendCommandAsync(request, pipeline.getFirstNode());
+XceiverClientAsyncReply asyncReply =
+sendCommandAsync(request, pipeline.getFirstNode());
+
+// TODO : for now make this API sync in nature as async requests are
+// served out of order over XceiverClientGrpc. This needs to be fixed
+// if this API is to be used for I/O path. Currently, this is not
+// used for Read/Write Operation but for tests.
+if (!HddsUtils.isReadOnly(re

hadoop git commit: HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul Kumar Singh.

2018-11-14 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 a2fa8324d -> 3923a4a27


HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3923a4a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3923a4a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3923a4a2

Branch: refs/heads/ozone-0.3
Commit: 3923a4a279565108ac4ad341c031173aa967c603
Parents: a2fa832
Author: Shashikant Banerjee 
Authored: Wed Nov 14 15:50:46 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 14 15:50:46 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  2 +-
 .../common/src/main/resources/ozone-default.xml |  4 +--
 .../server/ratis/ContainerStateMachine.java | 30 ++--
 3 files changed, 25 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3923a4a2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index f2cebe9..38c41ba 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -69,7 +69,7 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
   "dfs.container.ratis.segment.size";
   public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-  1 * 1024 * 1024 * 1024;
+  16 * 1024;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
   public static final int

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3923a4a2/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 512b3ee..4a72d39 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -175,10 +175,10 @@
   
   
 dfs.container.ratis.segment.size
-1073741824
+16384
 OZONE, RATIS, PERFORMANCE
 The size of the raft segment used by Apache Ratis on 
datanodes.
-  (1 GB by default)
+  (16 KB by default)
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3923a4a2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 270e164..d2d2209 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -119,7 +119,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   private final ConcurrentHashMap createContainerResponseMap;
   private ExecutorService[] executors;
   private final int numExecutors;
-  private final Map containerCommandCompletionMap;
+  private final Map applyTransactionCompletionMap;
+  private long lastIndex;
   /**
* CSM metrics.
*/
@@ -137,7 +138,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 this.executors = executors.toArray(new ExecutorService[numExecutors]);
 this.writeChunkFutureMap = new ConcurrentHashMap<>();
 this.createContainerResponseMap = new ConcurrentHashMap<>();
-containerCommandCompletionMap = new ConcurrentHashMap<>();
+applyTransactionCompletionMap = new ConcurrentHashMap<>();
+this.lastIndex = RaftServerConstants.INVALID_LOG_INDEX;
   }
 
   @Override
@@ -161,10 +163,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 
   private long loadSnapshot(SingleFileSnapshotInfo snapshot) {
 if (snapshot == null) {
-  TermIndex empty = TermIndex.newTermIndex(0, 0);
+  TermIndex empty = TermIndex.newTermIndex(0,
+  RaftServerConstants.INVALID_LOG_INDEX);
   LOG.info("The sn

hadoop git commit: HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul Kumar Singh.

2018-11-14 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fade865c -> a94828170


HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9482817
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9482817
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9482817

Branch: refs/heads/trunk
Commit: a94828170684793b80efdd76dc8a3167e324c0ea
Parents: 3fade86
Author: Shashikant Banerjee 
Authored: Wed Nov 14 15:53:22 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 14 15:53:22 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  2 +-
 .../common/src/main/resources/ozone-default.xml |  4 +--
 .../server/ratis/ContainerStateMachine.java | 27 +++-
 3 files changed, 24 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index cedcc43..b748d69 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -74,7 +74,7 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
   "dfs.container.ratis.segment.size";
   public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-  1 * 1024 * 1024 * 1024;
+  16 * 1024;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
   public static final int

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 54bffd5..e94e7e1 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -175,10 +175,10 @@
   
   
 dfs.container.ratis.segment.size
-1073741824
+16384
 OZONE, RATIS, PERFORMANCE
 The size of the raft segment used by Apache Ratis on 
datanodes.
-  (1 GB by default)
+  (16 KB by default)
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 3899bde..a3b496a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -120,7 +120,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   createContainerFutureMap;
   private ExecutorService[] executors;
   private final int numExecutors;
-  private final Map containerCommandCompletionMap;
+  private final Map applyTransactionCompletionMap;
+  private long lastIndex;
   /**
* CSM metrics.
*/
@@ -138,7 +139,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 this.executors = executors.toArray(new ExecutorService[numExecutors]);
 this.writeChunkFutureMap = new ConcurrentHashMap<>();
 this.createContainerFutureMap = new ConcurrentHashMap<>();
-containerCommandCompletionMap = new ConcurrentHashMap<>();
+applyTransactionCompletionMap = new ConcurrentHashMap<>();
+this.lastIndex = RaftServerConstants.INVALID_LOG_INDEX;
   }
 
   @Override
@@ -162,10 +164,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 
   private long loadSnapshot(SingleFileSnapshotInfo snapshot) {
 if (snapshot == null) {
-  TermIndex empty = TermIndex.newTermIndex(0, 0);
+  TermIndex empty = TermIndex.newTermIndex(0,
+  RaftServerConstants.INVALID_LOG_INDEX);
   LOG.info("The snapshot info is null." 

hadoop git commit: HDDS-774. Remove OpenContainerBlockMap from datanode. Contributed by Shashikant Banerjee.

2018-11-14 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk a94828170 -> b57cc73f8


HDDS-774. Remove OpenContainerBlockMap from datanode. Contributed by Shashikant 
Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b57cc73f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b57cc73f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b57cc73f

Branch: refs/heads/trunk
Commit: b57cc73f837ecb79ed275fc6e50ffce684baf573
Parents: a948281
Author: Shashikant Banerjee 
Authored: Wed Nov 14 20:05:56 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 14 20:05:56 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java |  54 +---
 .../common/impl/TestCloseContainerHandler.java  | 261 ---
 .../TestGetCommittedBlockLengthAndPutKey.java   |  71 -
 3 files changed, 2 insertions(+), 384 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b57cc73f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index d8c23bf..f970c72 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -54,7 +54,6 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.OpenContainerBlockMap;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
@@ -109,7 +108,6 @@ public class KeyValueHandler extends Handler {
   private final VolumeChoosingPolicy volumeChoosingPolicy;
   private final long maxContainerSize;
   private final AutoCloseableLock handlerLock;
-  private final OpenContainerBlockMap openContainerBlockMap;
 
   public KeyValueHandler(Configuration config, ContainerSet contSet,
   VolumeSet volSet, ContainerMetrics metrics) {
@@ -138,21 +136,12 @@ public class KeyValueHandler extends Handler {
 // this handler lock is used for synchronizing createContainer Requests,
 // so using a fair lock here.
 handlerLock = new AutoCloseableLock(new ReentrantLock(true));
-openContainerBlockMap = new OpenContainerBlockMap();
   }
 
   @VisibleForTesting
   public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() {
 return volumeChoosingPolicy;
   }
-  /**
-   * Returns OpenContainerBlockMap instance.
-   *
-   * @return OpenContainerBlockMap
-   */
-  public OpenContainerBlockMap getOpenContainerBlockMap() {
-return openContainerBlockMap;
-  }
 
   @Override
   public ContainerCommandResponseProto handle(
@@ -355,7 +344,6 @@ public class KeyValueHandler extends Handler {
   } else {
 long containerId = kvContainer.getContainerData().getContainerID();
 containerSet.removeContainer(containerId);
-openContainerBlockMap.removeContainer(containerId);
 // Release the lock first.
 // Avoid holding write locks for disk operations
 kvContainer.writeUnlock();
@@ -388,19 +376,11 @@ public class KeyValueHandler extends Handler {
 long containerID = kvContainer.getContainerData().getContainerID();
 try {
   checkContainerOpen(kvContainer);
-  KeyValueContainerData kvData = kvContainer.getContainerData();
-
-  // remove the container from open block map once, all the blocks
-  // have been committed and the container is closed
-  commitPendingBlocks(kvContainer);
-
   // TODO : The close command should move the container to either quasi
   // closed/closed depending upon how the closeContainer gets executed.
   // If it arrives by Standalone, it will be moved to Quasi Closed or
   // otherwise moved to Closed state if it gets executed via Ratis.
   kvContainer.close();
-  // make sure the the container open keys from BlockMap gets removed
-  openContainerBlockMap.removeContainer(kvData.getContainerID());
 } catch (StorageContainerException ex) {
   if (ex.getResult() == CLOSED_CONTAINER_IO) {
 LOG.debug("Container {} is already closed."

hadoop git commit: HDDS-845. Create a new raftClient instance for every watch request for Ratis. Contributed by Shashikant Banerjee.

2018-11-19 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk cfb915f3d -> 10cf5773b


HDDS-845. Create a new raftClient instance for every watch request for Ratis. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10cf5773
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10cf5773
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10cf5773

Branch: refs/heads/trunk
Commit: 10cf5773ba32566dd76730e32a3ccdf2b3bd4d09
Parents: cfb915f
Author: Shashikant Banerjee 
Authored: Mon Nov 19 14:38:51 2018 +0530
Committer: Shashikant Banerjee 
Committed: Mon Nov 19 14:38:51 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  3 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java | 38 +++-
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  3 +-
 3 files changed, 33 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10cf5773/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index c6b19ab..5592c1d 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -290,7 +290,8 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 
   @Override
   public void watchForCommit(long index, long timeout)
-  throws InterruptedException, ExecutionException, TimeoutException {
+  throws InterruptedException, ExecutionException, TimeoutException,
+  IOException {
 // there is no notion of watch for commit index in standalone pipeline
   };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10cf5773/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 6b3b001..b238a09 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -122,11 +122,15 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   public void close() {
 final RaftClient c = client.getAndSet(null);
 if (c != null) {
-  try {
-c.close();
-  } catch (IOException e) {
-throw new IllegalStateException(e);
-  }
+  closeRaftClient(c);
+}
+  }
+
+  private void closeRaftClient(RaftClient raftClient) {
+try {
+  raftClient.close();
+} catch (IOException e) {
+  throw new IllegalStateException(e);
 }
   }
 
@@ -145,19 +149,35 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 
   @Override
   public void watchForCommit(long index, long timeout)
-  throws InterruptedException, ExecutionException, TimeoutException {
-// TODO: Create a new Raft client instance to watch
-CompletableFuture replyFuture = getClient()
+  throws InterruptedException, ExecutionException, TimeoutException,
+  IOException {
+LOG.debug("commit index : {} watch timeout : {}", index, timeout);
+// create a new RaftClient instance for watch request
+RaftClient raftClient =
+RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy);
+CompletableFuture replyFuture = raftClient
 .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED);
 try {
   replyFuture.get(timeout, TimeUnit.MILLISECONDS);
 } catch (TimeoutException toe) {
   LOG.warn("3 way commit failed ", toe);
-  getClient()
+
+  closeRaftClient(raftClient);
+  // generate a new raft client instance again so that next watch request
+  // does not get blocked for the previous one
+
+  // TODO : need to remove the code to create the new RaftClient instance
+  // here once the watch request bypassing sliding window in Raft Client
+  // gets fixed.
+  raftClient =
+  RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy);
+  raftClient
   .sendWatchAsync(index, 
RaftProtos.ReplicationLevel.MAJORITY_COMMITTED)
   .get(timeout, TimeUnit.MILLISECONDS);
   LOG.info("Could not commit " + index + " to all the nodes."
   + "Committed by majority.");
+} finally {
+  closeRaftC

hadoop git commit: HDDS-835. Use storageSize instead of Long for buffer size configs in Ozone Client. Contributed by Shashikant Banerjee.

2018-11-20 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 10b5da85f -> c74783036


HDDS-835. Use storageSize instead of Long for buffer size configs in Ozone 
Client. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7478303
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7478303
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7478303

Branch: refs/heads/trunk
Commit: c74783036d8a2a1d2ce071a888d14e0ccff9263e
Parents: 10b5da8
Author: Shashikant Banerjee 
Authored: Tue Nov 20 23:01:02 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Nov 20 23:01:02 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  3 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  9 +++---
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 21 ++---
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 
 .../main/java/org/apache/ratis/RatisHelper.java |  3 +-
 .../common/src/main/resources/ozone-default.xml |  6 ++--
 .../transport/server/XceiverServerGrpc.java |  5 ++--
 .../server/ratis/XceiverServerRatis.java| 14 +
 .../replication/GrpcReplicationClient.java  |  4 +--
 .../ozone/client/io/ChunkGroupOutputStream.java |  7 ++---
 .../hadoop/ozone/client/rpc/RpcClient.java  | 31 +++-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  | 17 ++-
 .../rpc/TestCloseContainerHandlingByClient.java |  4 ++-
 .../client/rpc/TestFailureHandlingByClient.java |  5 
 .../om/TestMultipleContainerReadWrite.java  |  5 ++--
 .../web/storage/DistributedStorageHandler.java  | 29 ++
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  9 +++---
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  7 +++--
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  6 ++--
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |  4 ++-
 20 files changed, 110 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7478303/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 5592c1d..a824c29 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
 import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
@@ -103,7 +104,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 LOG.debug("Connecting to server Port : " + dn.getIpAddress());
 ManagedChannel channel =
 NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext()
-
.maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
+.maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
 .build();
 XceiverClientProtocolServiceStub asyncStub =
 XceiverClientProtocolServiceGrpc.newStub(channel);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7478303/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 896caed..f04d12f 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -73,12 +73,12 @@ public final class ScmConfigKeys {
   = 10;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
   "dfs.container.ratis.segment.size";
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-  16 * 1024;
+  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
+  "16KB";
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
-  public static final int
-  DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 

hadoop git commit: Revert "HDDS-835. Use storageSize instead of Long for buffer size configs in Ozone Client. Contributed by Shashikant Banerjee."

2018-11-20 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 49824ed26 -> 1734ace35


Revert "HDDS-835. Use storageSize instead of Long for buffer size configs in 
Ozone Client. Contributed by Shashikant Banerjee."

This reverts commit c74783036d8a2a1d2ce071a888d14e0ccff9263e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1734ace3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1734ace3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1734ace3

Branch: refs/heads/trunk
Commit: 1734ace35f1c92ff37ccf7f8545b4d74ecbc1cca
Parents: 49824ed
Author: Shashikant Banerjee 
Authored: Wed Nov 21 01:07:38 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 21 01:07:38 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  3 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  9 +++---
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 21 +++--
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 
 .../main/java/org/apache/ratis/RatisHelper.java |  3 +-
 .../common/src/main/resources/ozone-default.xml |  6 ++--
 .../transport/server/XceiverServerGrpc.java |  5 ++--
 .../server/ratis/XceiverServerRatis.java| 14 -
 .../replication/GrpcReplicationClient.java  |  4 +--
 .../ozone/client/io/ChunkGroupOutputStream.java |  7 +++--
 .../hadoop/ozone/client/rpc/RpcClient.java  | 31 +---
 .../hadoop/ozone/MiniOzoneClusterImpl.java  | 17 +--
 .../rpc/TestCloseContainerHandlingByClient.java |  4 +--
 .../client/rpc/TestFailureHandlingByClient.java |  5 
 .../om/TestMultipleContainerReadWrite.java  |  5 ++--
 .../web/storage/DistributedStorageHandler.java  | 29 --
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  9 +++---
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  7 ++---
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  6 ++--
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |  4 +--
 20 files changed, 84 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1734ace3/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index a824c29..5592c1d 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
 import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
@@ -104,7 +103,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 LOG.debug("Connecting to server Port : " + dn.getIpAddress());
 ManagedChannel channel =
 NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext()
-.maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
+
.maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
 .build();
 XceiverClientProtocolServiceStub asyncStub =
 XceiverClientProtocolServiceGrpc.newStub(channel);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1734ace3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index f04d12f..896caed 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -73,12 +73,12 @@ public final class ScmConfigKeys {
   = 10;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
   "dfs.container.ratis.segment.size";
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-  "16KB";
+  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
+  16 * 1024;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
-  public static final String
-  DFS_CONTA

hadoop git commit: HDDS-835. Use storageSize instead of Long for buffer size configs in Ozone Client. Contributed by Shashikant Banerjee.

2018-11-20 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk f994b526a -> 14e1a0a3d


HDDS-835. Use storageSize instead of Long for buffer size configs in Ozone 
Client. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14e1a0a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14e1a0a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14e1a0a3

Branch: refs/heads/trunk
Commit: 14e1a0a3d6cf0566ba696a73699aa7ce6ed1f94f
Parents: f994b52
Author: Shashikant Banerjee 
Authored: Wed Nov 21 10:51:50 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 21 10:51:50 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  3 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   | 11 ---
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 21 ++---
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 
 .../main/java/org/apache/ratis/RatisHelper.java |  3 +-
 .../common/src/main/resources/ozone-default.xml | 16 +-
 .../transport/server/XceiverServerGrpc.java |  5 ++--
 .../server/ratis/XceiverServerRatis.java| 14 +
 .../replication/GrpcReplicationClient.java  |  4 +--
 .../ozone/client/io/ChunkGroupOutputStream.java |  7 ++---
 .../hadoop/ozone/client/rpc/RpcClient.java  | 31 +++-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  | 17 ++-
 .../rpc/TestCloseContainerHandlingByClient.java |  4 ++-
 .../client/rpc/TestFailureHandlingByClient.java |  5 
 .../om/TestMultipleContainerReadWrite.java  |  5 ++--
 .../web/storage/DistributedStorageHandler.java  | 29 ++
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  9 +++---
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  7 +++--
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  6 ++--
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |  4 ++-
 20 files changed, 116 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14e1a0a3/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 5592c1d..a824c29 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
 import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
@@ -103,7 +104,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 LOG.debug("Connecting to server Port : " + dn.getIpAddress());
 ManagedChannel channel =
 NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext()
-
.maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
+.maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
 .build();
 XceiverClientProtocolServiceStub asyncStub =
 XceiverClientProtocolServiceGrpc.newStub(channel);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14e1a0a3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 896caed..6733b8e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -73,12 +73,12 @@ public final class ScmConfigKeys {
   = 10;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
   "dfs.container.ratis.segment.size";
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-  16 * 1024;
+  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
+  "16KB";
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
-  public static final int
-  DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 

hadoop git commit: HDDS-860. Fix TestDataValidate unit tests. Contributed by Shashikant Banerjee.

2018-11-20 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 14e1a0a3d -> c8b3dfa62


HDDS-860. Fix TestDataValidate unit tests. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8b3dfa6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8b3dfa6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8b3dfa6

Branch: refs/heads/trunk
Commit: c8b3dfa6250cd74fb3e449748595117b244089da
Parents: 14e1a0a
Author: Shashikant Banerjee 
Authored: Wed Nov 21 11:28:20 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 21 11:28:20 2018 +0530

--
 .../java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8b3dfa6/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index 5d72f557..d85b829 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -269,7 +269,7 @@ public final class RandomKeyGenerator implements 
Callable {
 
 processor.shutdown();
 processor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS);
-
+completed = true;
 progressbar.shutdown();
 
 if (validateWrites) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-865. GrpcXceiverService is added twice to GRPC netty server. Contributed by Xiaoyu Yao.

2018-11-21 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 34b6fa7d6 -> f42b2dfdf


HDDS-865. GrpcXceiverService is added twice to GRPC netty server. Contributed 
by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f42b2dfd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f42b2dfd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f42b2dfd

Branch: refs/heads/trunk
Commit: f42b2dfdf12c2f2a1952e2492f8d1b7b358013f6
Parents: 34b6fa7
Author: Shashikant Banerjee 
Authored: Wed Nov 21 23:43:01 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 21 23:43:01 2018 +0530

--
 .../container/common/transport/server/XceiverServerGrpc.java | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f42b2dfd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index be00c8a..4f7799d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -90,10 +90,6 @@ public final class XceiverServerGrpc implements 
XceiverServerSpi {
 }
 datanodeDetails.setPort(
 DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port));
-server = ((NettyServerBuilder) ServerBuilder.forPort(port))
-.maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
-.addService(new GrpcXceiverService(dispatcher))
-.build();
 NettyServerBuilder nettyServerBuilder =
 ((NettyServerBuilder) ServerBuilder.forPort(port))
 .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-866. Handle RaftRetryFailureException in OzoneClient. Contributed by Shashikant Banerjee.

2018-11-22 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 176bb3f81 -> ee44b069c


HDDS-866. Handle RaftRetryFailureException in OzoneClient. Contributed by 
Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee44b069
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee44b069
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee44b069

Branch: refs/heads/trunk
Commit: ee44b069c66703cd6c804c492647371fa0aa3501
Parents: 176bb3f
Author: Shashikant Banerjee 
Authored: Thu Nov 22 15:02:07 2018 +0530
Committer: Shashikant Banerjee 
Committed: Thu Nov 22 15:02:07 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientRatis.java |  9 +++-
 .../container/common/impl/HddsDispatcher.java   | 56 ++--
 .../server/ratis/XceiverServerRatis.java| 14 ++---
 .../container/keyvalue/KeyValueContainer.java   |  3 ++
 hadoop-hdds/pom.xml |  2 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |  9 ++--
 .../client/rpc/TestFailureHandlingByClient.java | 10 +---
 hadoop-ozone/pom.xml|  2 +-
 8 files changed, 54 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee44b069/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index b238a09..8a07526 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.ratis.proto.RaftProtos;
+import org.apache.ratis.protocol.RaftRetryFailureException;
 import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.thirdparty.com.google.protobuf
 .InvalidProtocolBufferException;
@@ -196,10 +197,16 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 new ArrayList<>();
 CompletableFuture containerCommandResponse =
 raftClientReply.whenComplete((reply, e) -> LOG
-.debug("received reply {} for request: {} exception: {}", request,
+.info("received reply {} for request: {} exception: {}", request,
 reply, e))
 .thenApply(reply -> {
   try {
+// we need to handle RaftRetryFailure Exception
+RaftRetryFailureException raftRetryFailureException =
+reply.getRetryFailureException();
+if (raftRetryFailureException != null) {
+  throw new CompletionException(raftRetryFailureException);
+}
 ContainerCommandResponseProto response =
 ContainerCommandResponseProto
 .parseFrom(reply.getMessage().getContent());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee44b069/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index b8669fb..24ba784 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -284,7 +284,12 @@ public class HddsDispatcher implements 
ContainerDispatcher, Auditor {
   @Override
   public void validateContainerCommand(
   ContainerCommandRequestProto msg) throws StorageContainerException {
-ContainerType containerType = msg.getCreateContainer().getContainerType();
+long containerID = msg.getContainerID();
+Container container = getContainer(containerID);
+if (container == null) {
+  return;
+}
+ContainerType containerType = container.getContainerType();
 ContainerProtos.Type cmdType = msg.getCmdType();
 AuditAction action =
 ContainerCommandRequestPBHelper.getAuditAction(cmdType);
@@ -299,35 +304,30 @@ public class HddsDispatcher implements 
ContainerDispatcher, Auditor {
   audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
   throw ex;
 }
-long containerID = msg.getContainerID();
-Container conta

hadoop git commit: HDDS-869. Fix log message in XceiverClientRatis#sendCommandAsync. Contributed by Lokesh Jain.

2018-11-22 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk ff2484906 -> 95d526a27


HDDS-869. Fix log message in XceiverClientRatis#sendCommandAsync. Contributed 
by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95d526a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95d526a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95d526a2

Branch: refs/heads/trunk
Commit: 95d526a270033462718df6e11a584b727e5c532b
Parents: ff24849
Author: Shashikant Banerjee 
Authored: Thu Nov 22 18:39:14 2018 +0530
Committer: Shashikant Banerjee 
Committed: Thu Nov 22 18:39:14 2018 +0530

--
 .../java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java  | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95d526a2/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 8a07526..28d3e7a 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -196,9 +196,11 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 Collection commitInfos =
 new ArrayList<>();
 CompletableFuture containerCommandResponse =
-raftClientReply.whenComplete((reply, e) -> LOG
-.info("received reply {} for request: {} exception: {}", request,
-reply, e))
+raftClientReply.whenComplete((reply, e) -> LOG.debug(
+"received reply {} for request: cmdType={} containerID={}"
++ " pipelineID={} traceID={} exception: {}", reply,
+request.getCmdType(), request.getContainerID(),
+request.getPipelineID(), request.getTraceID(), e))
 .thenApply(reply -> {
   try {
 // we need to handle RaftRetryFailure Exception


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-850. ReadStateMachineData hits OverlappingFileLockException in ContainerStateMachine. Contributed by Shashikant Banerjee.

2018-11-29 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7eb0d3a32 -> 5e102f9aa


HDDS-850. ReadStateMachineData hits OverlappingFileLockException in 
ContainerStateMachine. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e102f9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e102f9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e102f9a

Branch: refs/heads/trunk
Commit: 5e102f9aa54d3057ef5f0755d45428f22a24990b
Parents: 7eb0d3a
Author: Shashikant Banerjee 
Authored: Thu Nov 29 22:20:08 2018 +0530
Committer: Shashikant Banerjee 
Committed: Thu Nov 29 22:20:08 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   8 ++
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   9 ++
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../common/src/main/resources/ozone-default.xml |   8 ++
 .../server/ratis/ContainerStateMachine.java | 134 +++
 .../server/ratis/XceiverServerRatis.java|  14 +-
 .../container/keyvalue/KeyValueHandler.java |   7 +-
 .../keyvalue/impl/ChunkManagerImpl.java |  11 +-
 .../keyvalue/interfaces/ChunkManager.java   |   5 +-
 .../keyvalue/TestChunkManagerImpl.java  |   6 +-
 .../common/impl/TestContainerPersistence.java   |  11 +-
 11 files changed, 143 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e102f9a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 6733b8e..062b101 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -93,6 +93,14 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
   "dfs.container.ratis.log.queue.size";
   public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT = 128;
+
+  // expiry interval stateMachineData cache entry inside containerStateMachine
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =
+  "dfs.container.ratis.statemachine.cache.expiry.interval";
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT =
+  "10s";
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
   "dfs.ratis.client.request.timeout.duration";
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e102f9a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 879f773..df233f7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -249,6 +249,15 @@ public final class OzoneConfigKeys {
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
   ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
 
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =
+  ScmConfigKeys.
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT =
+  ScmConfigKeys.
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT;
+
   public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
   "dfs.container.ratis.datanode.storage.dir";
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e102f9a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 3695b6b..5237af8 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -392,6 +392,7 @@ message  WriteChunkResponseProto {
 message  ReadChunkRequestProto  {
   required Datanod

hadoop git commit: HDDS-887. Add DispatcherContext info to Dispatcher from containerStateMachine. Contributed by Shashikant Banerjee.

2018-12-01 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk d15dc4365 -> 5a3c7714c


HDDS-887. Add DispatcherContext info to Dispatcher from containerStateMachine. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a3c7714
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a3c7714
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a3c7714

Branch: refs/heads/trunk
Commit: 5a3c7714c4d7822827ec365ea187fa8f43eb0e45
Parents: d15dc43
Author: Shashikant Banerjee 
Authored: Sun Dec 2 08:00:35 2018 +0530
Committer: Shashikant Banerjee 
Committed: Sun Dec 2 08:00:35 2018 +0530

--
 .../main/proto/DatanodeContainerProtocol.proto  |   8 --
 .../container/common/impl/HddsDispatcher.java   |   8 +-
 .../common/interfaces/ContainerDispatcher.java  |   5 +-
 .../container/common/interfaces/Handler.java|   4 +-
 .../transport/server/GrpcXceiverService.java|   3 +-
 .../transport/server/XceiverServerGrpc.java |   2 +-
 .../server/ratis/ContainerStateMachine.java | 120 +++--
 .../server/ratis/DispatcherContext.java | 133 +++
 .../container/keyvalue/KeyValueHandler.java |  75 +++
 .../container/keyvalue/helpers/BlockUtils.java  |   8 +-
 .../keyvalue/helpers/SmallFileUtils.java|  10 +-
 .../keyvalue/impl/ChunkManagerImpl.java |   4 +-
 .../keyvalue/interfaces/ChunkManager.java   |   5 +-
 .../common/impl/TestHddsDispatcher.java |  14 +-
 .../keyvalue/TestChunkManagerImpl.java  |  17 +--
 .../container/keyvalue/TestKeyValueHandler.java |  48 +++
 .../common/impl/TestContainerPersistence.java   |  20 +--
 .../transport/server/ratis/TestCSMMetrics.java  |   3 +-
 .../container/server/TestContainerServer.java   |   6 +-
 .../genesis/BenchMarkDatanodeDispatcher.java|  16 +--
 20 files changed, 321 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a3c7714/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 5237af8..661d910 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -373,17 +373,10 @@ enum ChecksumType {
 MD5 = 5;
 }
 
-enum Stage {
-WRITE_DATA = 1;
-COMMIT_DATA = 2;
-COMBINED = 3;
-}
-
 message  WriteChunkRequestProto  {
   required DatanodeBlockID blockID = 1;
   required ChunkInfo chunkData = 2;
   optional bytes data = 3;
-  optional Stage stage = 4 [default = COMBINED];
 }
 
 message  WriteChunkResponseProto {
@@ -392,7 +385,6 @@ message  WriteChunkResponseProto {
 message  ReadChunkRequestProto  {
   required DatanodeBlockID blockID = 1;
   required ChunkInfo chunkData = 2;
-  optional bool readFromTmpFile = 3 [default = false];
 }
 
 message  ReadChunkResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a3c7714/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 352cc86..c5c51a3 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -47,6 +47,8 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.common.transport.server.ratis
+.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -133,7 +135,7 @@ public class HddsDispatcher implements ContainerDispatcher, 
Auditor {
 
   @Override
   public ContainerCommandResponseProto dispatch(
-  ContainerCommandRequestProto msg) {
+  ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
 Preconditions.checkNotNull(msg);
 LOG.trace("Command {}, trace ID: {} ", msg.getCmdTy

hadoop git commit: HDDS-882. Provide a config to optionally turn on/off the sync flag during chunk writes. Contributed by Shashikant Banerjee.

2018-12-01 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5a3c7714c -> 8f3e12ff0


HDDS-882. Provide a config to optionally turn on/off the sync flag during chunk 
writes. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f3e12ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f3e12ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f3e12ff

Branch: refs/heads/trunk
Commit: 8f3e12ff07f5a8490af23f3ca231f97b381682e5
Parents: 5a3c771
Author: Shashikant Banerjee 
Authored: Sun Dec 2 08:06:24 2018 +0530
Committer: Shashikant Banerjee 
Committed: Sun Dec 2 08:06:24 2018 +0530

--
 .../java/org/apache/hadoop/ozone/OzoneConfigKeys.java|  3 +++
 hadoop-hdds/common/src/main/resources/ozone-default.xml  |  8 
 .../hadoop/ozone/container/keyvalue/KeyValueHandler.java |  7 ++-
 .../ozone/container/keyvalue/helpers/ChunkUtils.java | 11 ---
 .../ozone/container/keyvalue/impl/ChunkManagerImpl.java  | 10 --
 .../ozone/container/keyvalue/TestChunkManagerImpl.java   |  2 +-
 .../container/common/impl/TestContainerPersistence.java  |  2 +-
 7 files changed, 35 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f3e12ff/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index df233f7..496861c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -52,6 +52,9 @@ public final class OzoneConfigKeys {
   public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
   false;
 
+  public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
+  "dfs.container.chunk.write.sync";
+  public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = true;
   /**
* Ratis Port where containers listen to.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f3e12ff/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 0545805..edce616 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -53,6 +53,14 @@
 
   
   
+dfs.container.chunk.write.sync
+true
+OZONE, CONTAINER, MANAGEMENT
+Determines whether the chunk writes in the container happen as
+  sync I/0 or buffered I/O operation.
+
+  
+  
 dfs.container.ratis.statemachinedata.sync.timeout
 10s
 OZONE, DEBUG, CONTAINER, RATIS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f3e12ff/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index b4cfcd0..5130253 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -49,6 +49,7 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers
 .StorageContainerException;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
@@ -111,13 +112,17 @@ public class KeyValueHandler extends Handler {
   private final VolumeChoosingPolicy volumeChoosingPolicy;
   private final long maxContainerSize;
   private final AutoCloseableLock handlerLock;
+  private final boolean doSyncWrite;
 
   public KeyValueHandler(Configuration config, StateContext context,
   ContainerSet contSet, VolumeSet volSet, ContainerMetrics metrics) {
 super(config, context, contSet, volSet, metrics);
 containerType = ContainerType.KeyValueContainer;
 blockManager = new BlockManagerImpl(config);
-chunk

hadoop git commit: HDDS-539. Ozone datanode command ignores the invalid options. Contributed by Vinicius Higa Murakami.

2018-12-18 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 94b368f29 -> ee10ba26d


HDDS-539. Ozone datanode command ignores the invalid options. Contributed by 
Vinicius Higa Murakami.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee10ba26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee10ba26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee10ba26

Branch: refs/heads/trunk
Commit: ee10ba26dd1e1cbddef7f8be19da595e70e3108e
Parents: 94b368f
Author: Shashikant Banerjee 
Authored: Tue Dec 18 16:10:27 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Dec 18 16:10:27 2018 +0530

--
 .../hadoop/ozone/HddsDatanodeService.java   | 118 +
 .../hadoop/ozone/TestHddsDatanodeService.java   |   3 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |   6 +-
 .../ozone/ozShell/TestOzoneDatanodeShell.java   | 250 +++
 4 files changed, 325 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee10ba26/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 348196c..fb2eba3 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -1,19 +1,19 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package org.apache.hadoop.ozone;
 
@@ -21,20 +21,20 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.cli.GenericCli;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-.DatanodeStateMachine;
-import org.apache.hadoop.util.GenericOptionsParser;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Command;
 
 import java.io.File;
 import java.io.IOException;
@@ -48,12 +48,16 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
 /**
  * Datanode service plugin to start the HDDS container services.
  */
-public class HddsDatanodeService implements ServicePlugin {
+
+@Command(name = "ozone datanode",
+hidden = true, description = "Start the datanode for ozone",
+versionProvider = HddsVersionProvider.class,
+mixinStandardHelpOptions = true)
+public class HddsDatanodeService extends GenericCli implements ServicePlugin {
 
   private static final Logger LOG = LoggerFactory.getLogger(
   HddsDatanodeService.class);
 
-
   privat

[2/2] hadoop git commit: HDDS-925. Rename ChunkGroupOutputStream to KeyOutputStream and ChunkOutputStream to BlockOutputStream. Contributed by Shashikant Banerjee.

2018-12-18 Thread shashikant
HDDS-925. Rename ChunkGroupOutputStream to KeyOutputStream and 
ChunkOutputStream to BlockOutputStream. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ff1c46d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ff1c46d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ff1c46d

Branch: refs/heads/trunk
Commit: 4ff1c46d95c2b1bc645fe12004eb7c434b8f7b74
Parents: ee10ba2
Author: Shashikant Banerjee 
Authored: Tue Dec 18 18:03:46 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Dec 18 18:03:46 2018 +0530

--
 .../hdds/scm/storage/BlockOutputStream.java | 627 ++
 .../hdds/scm/storage/ChunkOutputStream.java | 627 --
 .../ozone/client/io/ChunkGroupOutputStream.java | 812 ---
 .../hadoop/ozone/client/io/KeyOutputStream.java | 812 +++
 .../ozone/client/io/OzoneOutputStream.java  |   8 +-
 .../hadoop/ozone/client/rpc/RpcClient.java  |  10 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  54 +-
 .../rpc/TestContainerStateMachineFailures.java  |   6 +-
 .../client/rpc/TestFailureHandlingByClient.java |  20 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|   6 +-
 .../web/storage/DistributedStorageHandler.java  |   6 +-
 .../hadoop/ozone/om/TestChunkStreams.java   |   2 +-
 .../hadoop/fs/ozone/OzoneFSOutputStream.java|   6 +-
 13 files changed, 1498 insertions(+), 1498 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ff1c46d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
new file mode 100644
index 000..32c6b6a
--- /dev/null
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -0,0 +1,627 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.XceiverClientAsyncReply;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.ChecksumData;
+import org.apache.hadoop.ozone.common.OzoneChecksumException;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+import java.util.UUID;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.concurrent.*;
+
+import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
+.putBlockAsync;
+import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
+.writeChunkAsync;
+
+/**
+ * An {@link OutputStream} used by the REST service in combination with the
+ * SCMClient to write the value of a key to a sequence
+ * of container chunks.  Writes are buffered locally and periodically written 
to
+ * the container as a new chunk.  In order to preserve the semantics that
+ * replacement of a pre-existing key is atomic, each instance of the stream has
+ * an internal unique identifier.  Th

[1/2] hadoop git commit: HDDS-925. Rename ChunkGroupOutputStream to KeyOutputStream and ChunkOutputStream to BlockOutputStream. Contributed by Shashikant Banerjee.

2018-12-18 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk ee10ba26d -> 4ff1c46d9


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ff1c46d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
new file mode 100644
index 000..5e7cb9b
--- /dev/null
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -0,0 +1,812 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.client.io;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
+import org.apache.hadoop.hdds.client.BlockID;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.om.helpers.*;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import 
org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+.StorageContainerException;
+import org.apache.hadoop.hdds.scm.protocolPB
+.StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.ratis.protocol.RaftRetryFailureException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.ListIterator;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Maintaining a list of ChunkInputStream. Write based on offset.
+ *
+ * Note that this may write to multiple containers in one write call. In case
+ * that first container succeeded but later ones failed, the succeeded writes
+ * are not rolled back.
+ *
+ * TODO : currently not support multi-thread access.
+ */
+public class KeyOutputStream extends OutputStream {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(KeyOutputStream.class);
+
+  // array list's get(index) is O(1)
+  private final ArrayList streamEntries;
+  private int currentStreamIndex;
+  private final OzoneManagerProtocolClientSideTranslatorPB omClient;
+  private final
+  StorageContainerLocationProtocolClientSideTranslatorPB scmClient;
+  private final OmKeyArgs keyArgs;
+  private final long openID;
+  private final XceiverClientManager xceiverClientManager;
+  private final int chunkSize;
+  private final String requestID;
+  private boolean closed;
+  private final long streamBufferFlushSize;
+  private final long streamBufferMaxSize;
+  private final long watchTimeout;
+  private final long blockSize;
+  private final Checksum checksum;
+  private List bufferList;
+  private OmMultipartCommitUploadPartInfo commitUploadPartInfo;
+  /**
+   * A constructor for testing purpose only.
+   */
+  @VisibleForTesting
+  public KeyOutputStream() {
+streamEntries = new ArrayList<>();
+omClient = null;
+scmClient = null;
+keyArgs = null;
+openID = -1;
+xceiverClientManager = null;
+chunkSize = 0;
+requestID = null;
+closed = false;
+streamBufferFlushSize = 0;
+streamBufferMaxSize = 0;
+bufferList = new ArrayList<>(1);
+ByteBuffer buffer = ByteBuffer.allocate(1);
+bufferList.add(buffer);
+watchTimeout = 0;
+blockSize = 0;
+this.checksum = new Checksum();
+  }
+
+  /**
+   * For testing purpose only. Not building output stream from b

hadoop git commit: HDDS-912. Update ozone to latest ratis snapshot build (0.4.0-3b0be02-SNAPSHOT). Contributed by Lokesh Jain.

2018-12-18 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4ff1c46d9 -> b51e9e431


HDDS-912. Update ozone to latest ratis snapshot build (0.4.0-3b0be02-SNAPSHOT). 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b51e9e43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b51e9e43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b51e9e43

Branch: refs/heads/trunk
Commit: b51e9e431728519a05a347d2e8f39619fe4b1145
Parents: 4ff1c46
Author: Shashikant Banerjee 
Authored: Tue Dec 18 18:21:17 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Dec 18 18:21:17 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   | 23 +++--
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 23 +++--
 .../main/java/org/apache/ratis/RatisHelper.java |  2 +-
 .../common/src/main/resources/ozone-default.xml | 51 +---
 .../server/ratis/ContainerStateMachine.java | 10 +++-
 .../server/ratis/XceiverServerRatis.java| 32 
 hadoop-hdds/pom.xml |  2 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java   |  2 +-
 .../apache/hadoop/ozone/om/OMConfigKeys.java| 14 +-
 .../org/apache/hadoop/ozone/OzoneTestUtils.java |  2 +-
 .../transport/server/ratis/TestCSMMetrics.java  |  2 +-
 .../ozoneimpl/TestOzoneContainerRatis.java  |  2 +-
 .../container/server/TestContainerServer.java   |  2 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java | 18 ---
 hadoop-ozone/pom.xml|  2 +-
 15 files changed, 145 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51e9e43/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 062b101..38e6cbd 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -78,7 +78,7 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
   public static final String
-  DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "128MB";
+  DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "16KB";
   public static final String
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
   "dfs.container.ratis.statemachinedata.sync.timeout";
@@ -90,10 +90,23 @@ public final class ScmConfigKeys {
   "dfs.container.ratis.statemachinedata.sync.retries";
   public static final int
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
-  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
-  "dfs.container.ratis.log.queue.size";
-  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT = 128;
-
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
+  "dfs.container.ratis.log.queue.num-elements";
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
+  1024;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
+  "dfs.container.ratis.log.queue.byte-limit";
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT =
+  "4GB";
+  public static final String
+  DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
+  "dfs.container.ratis.log.appender.queue.num-elements";
+  public static final int
+  DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1;
+  public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT 
=
+  "dfs.container.ratis.log.appender.queue.byte-limit";
+  public static final String
+  DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
   // expiry interval stateMachineData cache entry inside containerStateMachine
   public static final String
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51e9e43/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index bac07d5..9c78daf 100644
--- 
a/hadoop-hdds/common

[hadoop] branch trunk updated: HDDS-932. Add blockade Tests for Network partition. Contributed by Nilotpal Nandi.

2019-01-23 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e72e27e  HDDS-932. Add blockade Tests for Network partition. 
Contributed by Nilotpal Nandi.
e72e27e is described below

commit e72e27edd8c1bbff591fcd7cd9318ce7dded35f4
Author: Shashikant Banerjee 
AuthorDate: Wed Jan 23 14:05:49 2019 +0530

HDDS-932. Add blockade Tests for Network partition. Contributed by Nilotpal 
Nandi.
---
 .../src/main/blockade/blockadeUtils/blockade.py|  38 ++-
 .../main/blockade/clusterUtils/cluster_utils.py| 112 ++---
 .../blockade/test_blockade_datanode_isolation.py   |  91 +
 .../{test_blockade.py => test_blockade_flaky.py}   |  10 +-
 4 files changed, 234 insertions(+), 17 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py 
b/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py
index 37c275f..432562e 100644
--- a/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py
+++ b/hadoop-ozone/dist/src/main/blockade/blockadeUtils/blockade.py
@@ -21,6 +21,7 @@ from subprocess import call
 import subprocess
 import logging
 import random
+from clusterUtils.cluster_utils import ClusterUtils
 
 logger = logging.getLogger(__name__)
 
@@ -32,9 +33,13 @@ class Blockade(object):
 call(["blockade", "destroy"])
 
 @classmethod
+def blockade_up(cls):
+call(["blockade", "up"])
+
+@classmethod
 def blockade_status(cls):
-output = call(["blockade", "status"])
-return output
+exit_code, output = ClusterUtils.run_cmd("blockade status")
+return exit_code, output
 
 @classmethod
 def make_flaky(cls, flaky_node, container_list):
@@ -57,3 +62,32 @@ class Blockade(object):
 def blockade_fast_all(cls):
 output = call(["blockade", "fast", "--all"])
 assert output == 0, "fast command failed with exit code=[%s]" % output
+
+@classmethod
+def blockade_create_partition(cls, *args):
+nodes = ""
+for node_list in args:
+nodes = nodes + ','.join(node_list) + " "
+exit_code, output = ClusterUtils.run_cmd("blockade partition %s" % 
nodes)
+assert exit_code == 0, "blockade partition command failed with exit 
code=[%s]" % output
+
+@classmethod
+def blockade_join(cls):
+output = call(["blockade", "join"])
+assert output == 0, "blockade join command failed with exit code=[%s]" 
% output
+
+@classmethod
+def blockade_stop(cls, node, all_nodes=False):
+if all_nodes:
+output = call(["blockade", "stop", "--all"])
+else:
+output = call(["blockade", "stop", node])
+assert output == 0, "blockade stop command failed with exit code=[%s]" 
% output
+
+@classmethod
+def blockade_start(cls, node, all_nodes=False):
+if all_nodes:
+output = call(["blockade", "start", "--all"])
+else:
+output = call(["blockade", "start", node])
+assert output == 0, "blockade start command failed with exit 
code=[%s]" % output
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py 
b/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py
index a45035b..26342c7 100644
--- a/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py
+++ b/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py
@@ -21,6 +21,8 @@ from subprocess import call
 import subprocess
 import logging
 import time
+import re
+import yaml
 
 
 logger = logging.getLogger(__name__)
@@ -61,15 +63,101 @@ class ClusterUtils(object):
 def run_freon(cls, docker_compose_file, num_volumes, num_buckets, 
num_keys, key_size,
   replication_type, replication_factor):
 # run freon
-logger.info("Running freon ...")
-output = call(["docker-compose", "-f", docker_compose_file,
-  "exec", "ozoneManager",
-  "/opt/hadoop/bin/ozone",
-  "freon", "rk",
-  "--numOfVolumes", str(num_volumes),
-  "--numOfBuckets", str(num_buckets),
-  "--numOfKeys", str(num_keys),
-

[hadoop] branch trunk updated: HDDS-996. Incorrect data length gets updated in OM by client in case it hits exception in multiple successive block writes. Contributed by Shashikant Banerjee.

2019-01-24 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f3d8265  HDDS-996. Incorrect data length gets updated in OM by client 
in case it hits exception in multiple successive block writes. Contributed by 
Shashikant Banerjee.
f3d8265 is described below

commit f3d8265582df88278dccf02e8e63cf0d2ba5286f
Author: Shashikant Banerjee 
AuthorDate: Thu Jan 24 16:37:05 2019 +0530

HDDS-996. Incorrect data length gets updated in OM by client in case it 
hits exception in multiple successive block writes. Contributed by Shashikant 
Banerjee.
---
 .../org/apache/hadoop/ozone/client/io/KeyOutputStream.java| 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 22efab3..042acee 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -286,7 +286,7 @@ public class KeyOutputStream extends OutputStream {
   BlockOutputStreamEntry current = streamEntries.get(currentStreamIndex);
 
   // length(len) will be in int range if the call is happening through
-  // write API of chunkOutputStream. Length can be in long range if it 
comes
+  // write API of blockOutputStream. Length can be in long range if it 
comes
   // via Exception path.
   int writeLen = Math.min((int)len, (int) current.getRemaining());
   long currentPos = current.getWrittenDataLength();
@@ -302,7 +302,14 @@ public class KeyOutputStream extends OutputStream {
 || retryFailure) {
   // for the current iteration, totalDataWritten - currentPos gives the
   // amount of data already written to the buffer
-  writeLen = (int) (current.getWrittenDataLength() - currentPos);
+
+  // In the retryPath, the total data to be written will always be 
equal
+  // to or less than the max length of the buffer allocated.
+  // The len specified here is the combined sum of the data length of
+  // the buffers
+  Preconditions.checkState(!retry || len <= streamBufferMaxSize);
+  writeLen = retry ? (int) len :
+  (int) (current.getWrittenDataLength() - currentPos);
   LOG.debug("writeLen {}, total len {}", writeLen, len);
   handleException(current, currentStreamIndex, retryFailure);
 } else {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1004. SCMContainerManager#updateContainerStateInternal fails for QUASI_CLOSE and FORCE_CLOSE events. Contributed by Lokesh Jain.

2019-01-29 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5d578d0  HDDS-1004. SCMContainerManager#updateContainerStateInternal 
fails for QUASI_CLOSE and FORCE_CLOSE events. Contributed by Lokesh Jain.
5d578d0 is described below

commit 5d578d0c4a0d9184dc5d54589ecafc91c0ec16cc
Author: Shashikant Banerjee 
AuthorDate: Tue Jan 29 14:11:56 2019 +0530

HDDS-1004. SCMContainerManager#updateContainerStateInternal fails for 
QUASI_CLOSE and FORCE_CLOSE events. Contributed by Lokesh Jain.
---
 .../hdds/scm/container/ContainerReportHandler.java | 25 +++---
 .../hdds/scm/container/SCMContainerManager.java| 21 --
 .../hdds/scm/pipeline/RatisPipelineUtils.java  |  2 +-
 3 files changed, 13 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 0170caa..4500786 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -127,8 +127,8 @@ public class ContainerReportHandler implements
 }
   });
 } catch (ContainerNotFoundException e) {
-  LOG.warn("Cannot remove container replica, container {} not found",
-  id);
+  LOG.warn("Cannot remove container replica, container {} not found 
{}",
+  id, e);
 }
   }
 
@@ -140,8 +140,8 @@ public class ContainerReportHandler implements
   missingReplicas.forEach(id -> checkReplicationState(id, publisher));
 
 } catch (NodeNotFoundException ex) {
-  LOG.error("Received container report from unknown datanode {}",
-  datanodeDetails);
+  LOG.error("Received container report from unknown datanode {} {}",
+  datanodeDetails, ex);
 }
 
   }
@@ -170,12 +170,13 @@ public class ContainerReportHandler implements
   containerInfo.getContainerID());
 }
   } catch (ContainerNotFoundException e) {
-LOG.error("Received container report for an unknown container {} from" 
+
-" datanode {}", replicaProto.getContainerID(), 
datanodeDetails);
+LOG.error("Received container report for an unknown container {} from"
++ " datanode {} {}", replicaProto.getContainerID(),
+datanodeDetails, e);
   } catch (IOException e) {
-LOG.error("Exception while processing container report for container" +
-" {} from datanode {}",
-replicaProto.getContainerID(), datanodeDetails);
+LOG.error("Exception while processing container report for container"
++ " {} from datanode {} {}", replicaProto.getContainerID(),
+datanodeDetails, e);
   }
 }
 if (pendingDeleteStatusList.getNumPendingDeletes() > 0) {
@@ -190,10 +191,8 @@ public class ContainerReportHandler implements
   ContainerInfo container = containerManager.getContainer(containerID);
   replicateIfNeeded(container, publisher);
 } catch (ContainerNotFoundException ex) {
-  LOG.warn(
-  "Container is missing from containerStateManager. Can't request "
-  + "replication. {}",
-  containerID);
+  LOG.warn("Container is missing from containerStateManager. Can't request 
"
+  + "replication. {} {}", containerID, ex);
 }
 
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 6c7031d..6ab4cdf 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -313,27 +313,6 @@ public class SCMContainerManager implements 
ContainerManager {
 
   private ContainerInfo updateContainerStateInternal(ContainerID containerID,
   HddsProtos.LifeCycleEvent event) throws IOException {
-// Refactor the below code for better clarity.
-switch (event) {
-case FINALIZE:
-  // TODO: we don't need a lease manager here for closing as the
-  // container report will include the container state after HDFS-13008
-  // If a client failed to update the container close state, DN container
-  // report from 3 DNs wil

[hadoop] branch trunk updated: HDDS-1037. Fix the block discard logic in Ozone client. Contributed by Shashikant Banerjee.

2019-02-01 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4123353  HDDS-1037. Fix the block discard logic in Ozone client. 
Contributed by Shashikant Banerjee.
4123353 is described below

commit 4123353151c25d95d45f765d57094f5c8e21238c
Author: Shashikant Banerjee 
AuthorDate: Fri Feb 1 20:44:48 2019 +0530

HDDS-1037. Fix the block discard logic in Ozone client. Contributed by 
Shashikant Banerjee.
---
 .../main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 042acee..af39631 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -338,8 +338,9 @@ public class KeyOutputStream extends OutputStream {
   ListIterator streamEntryIterator =
   streamEntries.listIterator(currentStreamIndex);
   while (streamEntryIterator.hasNext()) {
-if (streamEntryIterator.next().getBlockID().getContainerID()
-== containerID) {
+BlockOutputStreamEntry streamEntry = streamEntryIterator.next();
+if (streamEntry.getBlockID().getContainerID()
+== containerID && streamEntry.getCurrentPosition() == 0) {
   streamEntryIterator.remove();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1026. Reads should fail over to alternate replica. Contributed by Shashikant Banerjee.

2019-02-09 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 965d26c  HDDS-1026. Reads should fail over to alternate replica. 
Contributed by Shashikant Banerjee.
965d26c is described below

commit 965d26c9c758bb0211cb918e95fa661194e771d3
Author: Shashikant Banerjee 
AuthorDate: Sun Feb 10 10:53:16 2019 +0530

HDDS-1026. Reads should fail over to alternate replica. Contributed by 
Shashikant Banerjee.
---
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  51 +++--
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |   9 +-
 .../hadoop/hdds/scm/storage/BlockInputStream.java  |  72 +---
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |   6 +-
 ...ientAsyncReply.java => XceiverClientReply.java} |  23 +++-
 .../apache/hadoop/hdds/scm/XceiverClientSpi.java   |  27 -
 .../hdds/scm/storage/ContainerProtocolCalls.java   |  23 ++--
 .../client/rpc/TestOzoneRpcClientAbstract.java | 126 +
 8 files changed, 266 insertions(+), 71 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 5c8ca26..6fa54a5 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -57,6 +57,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.stream.Collectors;
 
 /**
  * A Client for the storageContainer protocol.
@@ -198,11 +199,27 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   @Override
   public ContainerCommandResponseProto sendCommand(
   ContainerCommandRequestProto request) throws IOException {
-return sendCommandWithRetry(request);
+try {
+  XceiverClientReply reply;
+  reply = sendCommandWithRetry(request, null);
+  ContainerCommandResponseProto responseProto = reply.getResponse().get();
+  return responseProto;
+} catch (ExecutionException | InterruptedException e) {
+  throw new IOException("Failed to execute command " + request, e);
+}
   }
 
-  public ContainerCommandResponseProto sendCommandWithRetry(
-  ContainerCommandRequestProto request) throws IOException {
+  @Override
+  public XceiverClientReply sendCommand(
+  ContainerCommandRequestProto request, List excludeDns)
+  throws IOException {
+Preconditions.checkState(HddsUtils.isReadOnly(request));
+return sendCommandWithRetry(request, excludeDns);
+  }
+
+  private XceiverClientReply sendCommandWithRetry(
+  ContainerCommandRequestProto request, List excludeDns)
+  throws IOException {
 ContainerCommandResponseProto responseProto = null;
 
 // In case of an exception or an error, we will try to read from the
@@ -211,13 +228,24 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 // TODO: cache the correct leader info in here, so that any subsequent 
calls
 // should first go to leader
 List dns = pipeline.getNodes();
-for (DatanodeDetails dn : dns) {
+DatanodeDetails datanode = null;
+List healthyDns =
+excludeDns != null ? dns.stream().filter(dnId -> {
+  for (UUID excludeId : excludeDns) {
+if (dnId.getUuid().equals(excludeId)) {
+  return false;
+}
+  }
+  return true;
+}).collect(Collectors.toList()) : dns;
+for (DatanodeDetails dn : healthyDns) {
   try {
 LOG.debug("Executing command " + request + " on datanode " + dn);
 // In case the command gets retried on a 2nd datanode,
 // sendCommandAsyncCall will create a new channel and async stub
 // in case these don't exist for the specific datanode.
 responseProto = sendCommandAsync(request, dn).getResponse().get();
+datanode = dn;
 if (responseProto.getResult() == ContainerProtos.Result.SUCCESS) {
   break;
 }
@@ -226,14 +254,15 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 .getUuidString(), e);
 if (Status.fromThrowable(e.getCause()).getCode()
 == Status.UNAUTHENTICATED.getCode()) {
-  throw new SCMSecurityException("Failed to authenticate with " +
-  "GRPC XceiverServer with Ozone block token.");
+  throw new SCMSecurityException("Failed to authenticate with "
+  + "GRPC XceiverServer with Ozone block token.");
 }
   }
 }
 
 if (responseProto != null) {
-  return responseProto;
+  return n

[hadoop] branch trunk updated: HDDS-1028. Improve logging in SCMPipelineManager. Contributed by Lokesh Jain.

2019-02-15 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8a426dc  HDDS-1028. Improve logging in SCMPipelineManager. Contributed 
by Lokesh Jain.
8a426dc is described below

commit 8a426dc848d30dd89235f928a001d59160743ec5
Author: Shashikant Banerjee 
AuthorDate: Fri Feb 15 14:38:38 2019 +0530

HDDS-1028. Improve logging in SCMPipelineManager. Contributed by Lokesh 
Jain.
---
 .../org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java  |  2 ++
 .../hadoop/hdds/scm/pipeline/PipelineActionHandler.java|  2 ++
 .../hadoop/hdds/scm/pipeline/PipelineReportHandler.java|  3 +++
 .../hadoop/hdds/scm/pipeline/PipelineStateManager.java | 14 +-
 4 files changed, 20 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
index 4055449..93630f0 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
@@ -56,6 +56,8 @@ public class StaleNodeHandler implements 
EventHandler {
   EventPublisher publisher) {
 Set pipelineIds =
 nodeManager.getPipelines(datanodeDetails);
+LOG.info("Datanode {} moved to stale state. Finalizing its pipelines {}",
+datanodeDetails, pipelineIds);
 for (PipelineID pipelineID : pipelineIds) {
   try {
 Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
index c467b9e..94f757b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
@@ -57,6 +57,8 @@ public class PipelineActionHandler
   pipelineID = PipelineID.
   getFromProtobuf(action.getClosePipeline().getPipelineID());
   Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
+  LOG.info("Received pipeline action {} for {} from datanode [}",
+  action.getAction(), pipeline, report.getDatanodeDetails());
   RatisPipelineUtils
   .finalizeAndDestroyPipeline(pipelineManager, pipeline, ozoneConf,
   true);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index 2d4bae1..daffe1e 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -84,6 +84,7 @@ public class PipelineReportHandler implements
 }
 
 if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) {
+  LOGGER.info("Pipeline {} reported by {}", pipeline.getId(), dn);
   pipeline.reportDatanode(dn);
   if (pipeline.isHealthy()) {
 // if all the dns have reported, pipeline can be moved to OPEN state
@@ -94,6 +95,8 @@ public class PipelineReportHandler implements
   if (numContainers == 0) {
 // since all the containers have been closed the pipeline can be
 // destroyed
+LOGGER.info("Destroying pipeline {} as all containers are closed",
+pipeline);
 RatisPipelineUtils.destroyPipeline(pipelineManager, pipeline, conf);
   }
 } else {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index be8f391..a0ef964 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -23,6 +23,8 @@ import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
@@ -37,6 +39,9 @@ import java.util.NavigableSet;
  */
 class PipelineStateManage

[hadoop] branch trunk updated: HDDS-1076. TestSCMNodeManager crashed the jvm. Contributed by Lokesh Jain.

2019-02-15 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new de934ba  HDDS-1076. TestSCMNodeManager crashed the jvm. Contributed by 
Lokesh Jain.
de934ba is described below

commit de934ba2dc3da498190a11e0182c7cbf262c0754
Author: Shashikant Banerjee 
AuthorDate: Fri Feb 15 21:13:30 2019 +0530

HDDS-1076. TestSCMNodeManager crashed the jvm. Contributed by Lokesh Jain.
---
 .../java/org/apache/hadoop/utils/Scheduler.java| 101 +
 .../hadoop/hdds/scm/pipeline/PipelineFactory.java  |   6 ++
 .../hadoop/hdds/scm/pipeline/PipelineProvider.java |   2 +
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  12 +++
 .../hdds/scm/pipeline/RatisPipelineUtils.java  |  63 ++---
 .../hdds/scm/pipeline/SCMPipelineManager.java  |   4 +
 .../hdds/scm/pipeline/SimplePipelineProvider.java  |   5 +
 .../hdds/scm/pipeline/TestRatisPipelineUtils.java  |   8 +-
 8 files changed, 163 insertions(+), 38 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/Scheduler.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/Scheduler.java
new file mode 100644
index 000..1171dbf
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/Scheduler.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.utils;
+
+import org.apache.ratis.util.function.CheckedRunnable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This class encapsulates ScheduledExecutorService.
+ */
+public class Scheduler {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(Scheduler.class);
+
+  private ScheduledExecutorService scheduler;
+
+  private volatile boolean isClosed;
+
+  private String threadName;
+
+  /**
+   * Creates a ScheduledExecutorService based on input arguments.
+   * @param threadName - thread name
+   * @param isDaemon - if true the threads in the scheduler are started as
+   * daemon
+   * @param numCoreThreads - number of core threads to maintain in the 
scheduler
+   */
+  public Scheduler(String threadName, boolean isDaemon, int numCoreThreads) {
+scheduler = Executors.newScheduledThreadPool(numCoreThreads, r -> {
+  Thread t = new Thread(r);
+  t.setName(threadName);
+  t.setDaemon(isDaemon);
+  return t;
+});
+this.threadName = threadName;
+isClosed = false;
+  }
+
+  public void schedule(Runnable runnable, long delay, TimeUnit timeUnit) {
+scheduler.schedule(runnable, delay, timeUnit);
+  }
+
+  public void schedule(CheckedRunnable runnable, long delay,
+  TimeUnit timeUnit, Logger logger, String errMsg) {
+scheduler.schedule(() -> {
+  try {
+runnable.run();
+  } catch (Throwable throwable) {
+logger.error(errMsg, throwable);
+  }
+}, delay, timeUnit);
+  }
+
+  public void scheduleWithFixedDelay(Runnable runnable, long initialDelay,
+  long fixedDelay, TimeUnit timeUnit) {
+scheduler
+.scheduleWithFixedDelay(runnable, initialDelay, fixedDelay, timeUnit);
+  }
+
+  public boolean isClosed() {
+return isClosed;
+  }
+
+  /**
+   * Closes the scheduler for further task submission. Any pending tasks not
+   * yet executed are also cancelled. For the executing tasks the scheduler
+   * waits 60 seconds for completion.
+   */
+  public void close() {
+isClosed = true;
+scheduler.shutdownNow();
+try {
+  scheduler.awaitTermination(60, TimeUnit.SECONDS);
+} catch (InterruptedException e) {
+  LOG.info(threadName + " interrupted while waiting for task completion 
{}",
+  e);
+}
+scheduler = null;
+  }
+}
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelin

[hadoop] branch trunk updated: HDDS-726. Ozone Client should update SCM to move the container out of allocation path in case a write transaction fails. Contributed by Shashikant Banerjee.

2019-03-01 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new de1dae6  HDDS-726. Ozone Client should update SCM to move the 
container out of allocation path in case a write transaction fails. Contributed 
by Shashikant Banerjee.
de1dae6 is described below

commit de1dae64f2458f7ab57ae040b98c1f4e26d54bda
Author: Shashikant Banerjee 
AuthorDate: Fri Mar 1 23:30:28 2019 +0530

HDDS-726. Ozone Client should update SCM to move the container out of 
allocation path in case a write transaction fails. Contributed by Shashikant 
Banerjee.
---
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  20 +-
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  73 --
 .../hadoop/hdds/scm/storage/BlockInputStream.java  |   5 +-
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |  51 ++--
 .../apache/hadoop/hdds/scm/XceiverClientReply.java |  27 ++-
 .../apache/hadoop/hdds/scm/XceiverClientSpi.java   |  10 +-
 .../scm/container/common/helpers/ExcludeList.java  | 103 
 .../scm/protocol/ScmBlockLocationProtocol.java |   6 +-
 ...lockLocationProtocolClientSideTranslatorPB.java |   4 +-
 .../hdds/scm/storage/ContainerProtocolCalls.java   |   9 +-
 ...lockLocationProtocolServerSideTranslatorPB.java |   4 +-
 .../src/main/proto/ScmBlockLocationProtocol.proto  |   1 +
 hadoop-hdds/common/src/main/proto/hdds.proto   |   6 +
 .../apache/hadoop/hdds/scm/block/BlockManager.java |   6 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java|  29 ++-
 .../hadoop/hdds/scm/pipeline/PipelineManager.java  |   5 +
 .../hdds/scm/pipeline/PipelineStateManager.java|   9 +
 .../hadoop/hdds/scm/pipeline/PipelineStateMap.java |  53 
 .../hdds/scm/pipeline/SCMPipelineManager.java  |  15 ++
 .../hdds/scm/server/SCMBlockProtocolServer.java|  10 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java|  24 +-
 .../hadoop/ozone/client/OzoneClientUtils.java  |  15 ++
 .../ozone/client/io/BlockOutputStreamEntry.java|  11 +
 .../hadoop/ozone/client/io/KeyOutputStream.java| 134 +++
 .../ozone/om/protocol/OzoneManagerProtocol.java|   8 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   7 +-
 .../src/main/proto/OzoneManagerProtocol.proto  |   1 +
 .../rpc/TestCloseContainerHandlingByClient.java|  86 ++-
 .../client/rpc/TestFailureHandlingByClient.java| 266 +++--
 .../ozone/container/ContainerTestHelper.java   |  60 +
 .../hadoop/ozone/om/TestOmBlockVersioning.java |   3 +-
 .../org/apache/hadoop/ozone/om/KeyManager.java |   7 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   9 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |   6 +-
 .../protocolPB/OzoneManagerRequestHandler.java |   6 +-
 .../ozone/om/ScmBlockLocationTestIngClient.java|   4 +-
 .../hadoop/ozone/om/TestKeyDeletingService.java|   4 +-
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java |   6 +-
 .../ozone/genesis/BenchMarkBlockManager.java   |   3 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java  |   6 +-
 40 files changed, 872 insertions(+), 240 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index ddcf966..c068046 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -214,14 +214,14 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 
   @Override
   public XceiverClientReply sendCommand(
-  ContainerCommandRequestProto request, List excludeDns)
+  ContainerCommandRequestProto request, List excludeDns)
   throws IOException {
 Preconditions.checkState(HddsUtils.isReadOnly(request));
 return sendCommandWithRetry(request, excludeDns);
   }
 
   private XceiverClientReply sendCommandWithRetry(
-  ContainerCommandRequestProto request, List excludeDns)
+  ContainerCommandRequestProto request, List excludeDns)
   throws IOException {
 ContainerCommandResponseProto responseProto = null;
 
@@ -231,24 +231,24 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 // TODO: cache the correct leader info in here, so that any subsequent 
calls
 // should first go to leader
 List dns = pipeline.getNodes();
-DatanodeDetails datanode = null;
 List healthyDns =
 excludeDns != null ? dns.stream().filter(dnId -> {
-  for (UUID excludeId : excludeDns) {
-if (dnId.getUuid().equals(excludeId)) {
+  for (DatanodeDetails excludeId : excludeDns) {
+if (dnId.equals(excludeId)) {
   return false;
 }
   }
   return t

[hadoop] branch trunk updated: HDDS-935. Avoid creating an already created container on a datanode in case of disk removal followed by datanode restart. Contributed by Shashikant Banerjee.

2019-03-05 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5fcea54  HDDS-935. Avoid creating an already created container on a 
datanode in case of disk removal followed by datanode restart. Contributed by 
Shashikant Banerjee.
5fcea54 is described below

commit 5fcea54a50753a37b61f0e3c6be5a0236f95861a
Author: Shashikant Banerjee 
AuthorDate: Tue Mar 5 22:09:25 2019 +0530

HDDS-935. Avoid creating an already created container on a datanode in case 
of disk removal followed by datanode restart. Contributed by Shashikant 
Banerjee.
---
 .../src/main/proto/DatanodeContainerProtocol.proto |  5 ++
 .../ozone/container/common/impl/ContainerSet.java  | 22 ++-
 .../container/common/impl/HddsDispatcher.java  | 73 +++---
 .../common/interfaces/ContainerDispatcher.java |  8 +++
 .../server/ratis/ContainerStateMachine.java| 70 ++---
 .../transport/server/ratis/DispatcherContext.java  | 33 --
 .../container/keyvalue/TestKeyValueHandler.java|  3 +
 .../rpc/TestContainerStateMachineFailures.java | 37 ++-
 .../transport/server/ratis/TestCSMMetrics.java |  5 ++
 .../container/server/TestContainerServer.java  |  5 ++
 .../server/TestSecureContainerServer.java  |  4 ++
 11 files changed, 239 insertions(+), 26 deletions(-)

diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 197bfad..3b78835 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -142,6 +142,7 @@ enum Result {
   UNKNOWN_BCSID = 37;
   BCSID_MISMATCH = 38;
   CONTAINER_NOT_OPEN = 39;
+  CONTAINER_MISSING = 40;
 }
 
 /**
@@ -245,6 +246,10 @@ message ContainerDataProto {
   optional ContainerType containerType = 10 [default = KeyValueContainer];
 }
 
+message ContainerIdSetProto {
+repeated int64 containerId = 1;
+}
+
 enum ContainerType {
   KeyValueContainer = 1;
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index aff2275..4a7a950 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -35,9 +35,11 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Set;
 import java.util.Map;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.stream.Collectors;
 
 
@@ -50,7 +52,8 @@ public class ContainerSet {
 
   private final ConcurrentSkipListMap containerMap = new
   ConcurrentSkipListMap<>();
-
+  private final ConcurrentSkipListSet missingContainerSet =
+  new ConcurrentSkipListSet<>();
   /**
* Add Container to container map.
* @param container
@@ -128,6 +131,7 @@ public class ContainerSet {
* @return containerMap Iterator
*/
   public Iterator> getContainerMapIterator() {
+containerMap.keySet().stream().collect(Collectors.toSet());
 return containerMap.entrySet().iterator();
   }
 
@@ -218,4 +222,20 @@ public class ContainerSet {
 return deletionPolicy
 .chooseContainerForBlockDeletion(count, containerDataMap);
   }
+
+  public Set getMissingContainerSet() {
+return missingContainerSet;
+  }
+
+  /**
+   * Builds the missing container set by taking a diff total no containers
+   * actually found and number of containers which actually got created.
+   * This will only be called during the initialization of Datanode Service
+   * when  it still not a part of any write Pipeline.
+   * @param createdContainerSet ContainerId set persisted in the Ratis snapshot
+   */
+  public void buildMissingContainerSet(Set createdContainerSet) {
+missingContainerSet.addAll(createdContainerSet);
+missingContainerSet.removeAll(containerMap.keySet());
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 3653cb1..e7a6de3 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -66,6 

[hadoop] branch trunk updated: HDDS-1184. Parallelization of write chunks in datanodes is broken. Contributed by Shashikant Banerjee.

2019-03-05 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 62e89dc  HDDS-1184. Parallelization of write chunks in datanodes is 
broken. Contributed by Shashikant Banerjee.
62e89dc is described below

commit 62e89dc275f120f54967744393e2ddde15575096
Author: Shashikant Banerjee 
AuthorDate: Wed Mar 6 10:00:16 2019 +0530

HDDS-1184. Parallelization of write chunks in datanodes is broken. 
Contributed by Shashikant Banerjee.
---
 .../src/main/proto/DatanodeContainerProtocol.proto |  1 +
 .../server/ratis/ContainerStateMachine.java| 60 --
 2 files changed, 33 insertions(+), 28 deletions(-)

diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 3b78835..7396eb3 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -143,6 +143,7 @@ enum Result {
   BCSID_MISMATCH = 38;
   CONTAINER_NOT_OPEN = 39;
   CONTAINER_MISSING = 40;
+  BLOCK_TOKEN_VERIFICATION_FAILED = 41;
 }
 
 /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 759f957..ed7e099 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import io.opentracing.Scope;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
 import org.apache.ratis.protocol.RaftGroup;
 import org.apache.ratis.protocol.RaftGroupId;
@@ -350,13 +352,20 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   }
 
   private ContainerCommandResponseProto dispatchCommand(
-  ContainerCommandRequestProto requestProto,
-  DispatcherContext context) throws IOException {
+  ContainerCommandRequestProto requestProto, DispatcherContext context) {
 LOG.trace("dispatch {}", requestProto);
-if(isBlockTokenEnabled) {
-  // ServerInterceptors intercepts incoming request and creates ugi.
-  tokenVerifier.verify(UserGroupInformation.getCurrentUser()
-  .getShortUserName(), requestProto.getEncodedToken());
+if (isBlockTokenEnabled) {
+  try {
+// ServerInterceptors intercepts incoming request and creates ugi.
+tokenVerifier
+.verify(UserGroupInformation.getCurrentUser().getShortUserName(),
+requestProto.getEncodedToken());
+  } catch (IOException ioe) {
+StorageContainerException sce = new StorageContainerException(
+"Block token verification failed. " + ioe.getMessage(), ioe,
+ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
+return ContainerUtils.logAndReturnError(LOG, sce, requestProto);
+  }
 }
 ContainerCommandResponseProto response =
 dispatcher.dispatch(requestProto, context);
@@ -365,7 +374,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   }
 
   private Message runCommand(ContainerCommandRequestProto requestProto,
-  DispatcherContext context) throws IOException {
+  DispatcherContext context) {
 return dispatchCommand(requestProto, context)::toByteString;
   }
 
@@ -394,14 +403,10 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
 .setCreateContainerSet(createContainerSet)
 .build();
-CompletableFuture writeChunkFuture;
-try {
-  Message msg = runCommand(requestProto, context);
-  writeChunkFuture = CompletableFuture
-  .supplyAsync(() -> msg, chunkExecutor);
-}catch(IOException ie) {
-  writeChunkFuture = completeExceptionally(ie);
-}
+// ensure the write chunk happens asynchronously in writeChunkExecutor pool
+// thread.
+CompletableFuture writeChunkFuture = CompletableFuture
+.supplyAsync(() -> runCommand(requestProto, context), chunkExecutor);
 
 writeChunkFutureMap.put(entryIndex, writeChunkFuture);
 LOG.debug("writeChunk writeStateMachineData : blockId " + 
write.getBlockID()
@@ -567,16 +572,18 @

[hadoop] branch ozone-0.4 updated: HDDS-935. Avoid creating an already created container on a datanode in case of disk removal followed by datanode restart. Contributed by Shashikant Banerjee.

2019-03-07 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new b99ae7b  HDDS-935. Avoid creating an already created container on a 
datanode in case of disk removal followed by datanode restart. Contributed by 
Shashikant Banerjee.
b99ae7b is described below

commit b99ae7b4e2f32e9af618ffad9ae468bcfc5de0b2
Author: Shashikant Banerjee 
AuthorDate: Tue Mar 5 22:09:25 2019 +0530

HDDS-935. Avoid creating an already created container on a datanode in case 
of disk removal followed by datanode restart. Contributed by Shashikant 
Banerjee.

(cherry picked from commit 5fcea54a50753a37b61f0e3c6be5a0236f95861a)
---
 .../src/main/proto/DatanodeContainerProtocol.proto |  5 ++
 .../ozone/container/common/impl/ContainerSet.java  | 22 ++-
 .../container/common/impl/HddsDispatcher.java  | 73 +++---
 .../common/interfaces/ContainerDispatcher.java |  8 +++
 .../server/ratis/ContainerStateMachine.java| 70 ++---
 .../transport/server/ratis/DispatcherContext.java  | 33 --
 .../container/keyvalue/TestKeyValueHandler.java|  3 +
 .../rpc/TestContainerStateMachineFailures.java | 37 ++-
 .../transport/server/ratis/TestCSMMetrics.java |  5 ++
 .../container/server/TestContainerServer.java  |  5 ++
 .../server/TestSecureContainerServer.java  |  4 ++
 11 files changed, 239 insertions(+), 26 deletions(-)

diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 197bfad..3b78835 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -142,6 +142,7 @@ enum Result {
   UNKNOWN_BCSID = 37;
   BCSID_MISMATCH = 38;
   CONTAINER_NOT_OPEN = 39;
+  CONTAINER_MISSING = 40;
 }
 
 /**
@@ -245,6 +246,10 @@ message ContainerDataProto {
   optional ContainerType containerType = 10 [default = KeyValueContainer];
 }
 
+message ContainerIdSetProto {
+repeated int64 containerId = 1;
+}
+
 enum ContainerType {
   KeyValueContainer = 1;
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index aff2275..4a7a950 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -35,9 +35,11 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Set;
 import java.util.Map;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.stream.Collectors;
 
 
@@ -50,7 +52,8 @@ public class ContainerSet {
 
   private final ConcurrentSkipListMap containerMap = new
   ConcurrentSkipListMap<>();
-
+  private final ConcurrentSkipListSet missingContainerSet =
+  new ConcurrentSkipListSet<>();
   /**
* Add Container to container map.
* @param container
@@ -128,6 +131,7 @@ public class ContainerSet {
* @return containerMap Iterator
*/
   public Iterator> getContainerMapIterator() {
+containerMap.keySet().stream().collect(Collectors.toSet());
 return containerMap.entrySet().iterator();
   }
 
@@ -218,4 +222,20 @@ public class ContainerSet {
 return deletionPolicy
 .chooseContainerForBlockDeletion(count, containerDataMap);
   }
+
+  public Set getMissingContainerSet() {
+return missingContainerSet;
+  }
+
+  /**
+   * Builds the missing container set by taking a diff total no containers
+   * actually found and number of containers which actually got created.
+   * This will only be called during the initialization of Datanode Service
+   * when  it still not a part of any write Pipeline.
+   * @param createdContainerSet ContainerId set persisted in the Ratis snapshot
+   */
+  public void buildMissingContainerSet(Set createdContainerSet) {
+missingContainerSet.addAll(createdContainerSet);
+missingContainerSet.removeAll(containerMap.keySet());
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 3653cb1..e7a6de3 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/sr

[hadoop] branch ozone-0.4 updated: HDDS-1184. Parallelization of write chunks in datanodes is broken. Contributed by Shashikant Banerjee.

2019-03-07 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 8655abb  HDDS-1184. Parallelization of write chunks in datanodes is 
broken. Contributed by Shashikant Banerjee.
8655abb is described below

commit 8655abb353811eba349dd0703d702deb02532242
Author: Shashikant Banerjee 
AuthorDate: Wed Mar 6 10:00:16 2019 +0530

HDDS-1184. Parallelization of write chunks in datanodes is broken. 
Contributed by Shashikant Banerjee.

(cherry picked from commit 62e89dc275f120f54967744393e2ddde15575096)
---
 .../src/main/proto/DatanodeContainerProtocol.proto |  1 +
 .../server/ratis/ContainerStateMachine.java| 60 --
 2 files changed, 33 insertions(+), 28 deletions(-)

diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 3b78835..7396eb3 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -143,6 +143,7 @@ enum Result {
   BCSID_MISMATCH = 38;
   CONTAINER_NOT_OPEN = 39;
   CONTAINER_MISSING = 40;
+  BLOCK_TOKEN_VERIFICATION_FAILED = 41;
 }
 
 /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 759f957..ed7e099 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import io.opentracing.Scope;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
 import org.apache.ratis.protocol.RaftGroup;
 import org.apache.ratis.protocol.RaftGroupId;
@@ -350,13 +352,20 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   }
 
   private ContainerCommandResponseProto dispatchCommand(
-  ContainerCommandRequestProto requestProto,
-  DispatcherContext context) throws IOException {
+  ContainerCommandRequestProto requestProto, DispatcherContext context) {
 LOG.trace("dispatch {}", requestProto);
-if(isBlockTokenEnabled) {
-  // ServerInterceptors intercepts incoming request and creates ugi.
-  tokenVerifier.verify(UserGroupInformation.getCurrentUser()
-  .getShortUserName(), requestProto.getEncodedToken());
+if (isBlockTokenEnabled) {
+  try {
+// ServerInterceptors intercepts incoming request and creates ugi.
+tokenVerifier
+.verify(UserGroupInformation.getCurrentUser().getShortUserName(),
+requestProto.getEncodedToken());
+  } catch (IOException ioe) {
+StorageContainerException sce = new StorageContainerException(
+"Block token verification failed. " + ioe.getMessage(), ioe,
+ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED);
+return ContainerUtils.logAndReturnError(LOG, sce, requestProto);
+  }
 }
 ContainerCommandResponseProto response =
 dispatcher.dispatch(requestProto, context);
@@ -365,7 +374,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   }
 
   private Message runCommand(ContainerCommandRequestProto requestProto,
-  DispatcherContext context) throws IOException {
+  DispatcherContext context) {
 return dispatchCommand(requestProto, context)::toByteString;
   }
 
@@ -394,14 +403,10 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
 .setCreateContainerSet(createContainerSet)
 .build();
-CompletableFuture writeChunkFuture;
-try {
-  Message msg = runCommand(requestProto, context);
-  writeChunkFuture = CompletableFuture
-  .supplyAsync(() -> msg, chunkExecutor);
-}catch(IOException ie) {
-  writeChunkFuture = completeExceptionally(ie);
-}
+// ensure the write chunk happens asynchronously in writeChunkExecutor pool
+// thread.
+CompletableFuture writeChunkFuture = CompletableFuture
+.supplyAsync(() -> runCommand(requestProto, context), chunkExecutor);
 
 writeChunkFutureMap.put(entryIndex, writeChunkFuture);
 LOG.debug("w

[hadoop] branch trunk updated: HDDS-1173. Fix a data corruption bug in BlockOutputStream. Contributed by Shashikant Banerjee.

2019-03-11 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b4aa24d  HDDS-1173. Fix a data corruption bug in BlockOutputStream. 
Contributed by Shashikant Banerjee.
b4aa24d is described below

commit b4aa24d3c5ad1b9309a58795e4b48e567695c4e4
Author: Shashikant Banerjee 
AuthorDate: Mon Mar 11 23:15:49 2019 +0530

HDDS-1173. Fix a data corruption bug in BlockOutputStream. Contributed by 
Shashikant Banerjee.
---
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 232 +
 .../apache/hadoop/hdds/scm/storage/BufferPool.java | 106 ++
 .../ozone/client/io/BlockOutputStreamEntry.java|  23 +-
 .../hadoop/ozone/client/io/KeyOutputStream.java|  21 +-
 .../rpc/TestCloseContainerHandlingByClient.java|  62 --
 .../commandhandler/TestBlockDeletion.java  |   2 +-
 6 files changed, 279 insertions(+), 167 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 2e156b3..fe41f57 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -40,7 +40,6 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.nio.Buffer;
 import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.UUID;
@@ -87,7 +86,7 @@ public class BlockOutputStream extends OutputStream {
   private final long streamBufferFlushSize;
   private final long streamBufferMaxSize;
   private final long watchTimeout;
-  private List bufferList;
+  private BufferPool bufferPool;
   // The IOException will be set by response handling thread in case there is 
an
   // exception received in the response. If the exception is set, the next
   // request will fail upfront.
@@ -111,8 +110,6 @@ public class BlockOutputStream extends OutputStream {
   // map containing mapping for putBlock logIndex to to flushedDataLength Map.
   private ConcurrentHashMap commitIndex2flushedDataMap;
 
-  private int currentBufferIndex;
-
   private List failedServers;
 
   /**
@@ -124,7 +121,7 @@ public class BlockOutputStream extends OutputStream {
* @param pipeline pipeline where block will be written
* @param traceID  container protocol call args
* @param chunkSizechunk size
-   * @param bufferList   list of byte buffers
+   * @param bufferPool   pool of buffers
* @param streamBufferFlushSize flush size
* @param streamBufferMaxSize   max size of the currentBuffer
* @param watchTimeout  watch timeout
@@ -135,7 +132,7 @@ public class BlockOutputStream extends OutputStream {
   public BlockOutputStream(BlockID blockID, String key,
   XceiverClientManager xceiverClientManager, Pipeline pipeline,
   String traceID, int chunkSize, long streamBufferFlushSize,
-  long streamBufferMaxSize, long watchTimeout, List bufferList,
+  long streamBufferMaxSize, long watchTimeout, BufferPool bufferPool,
   ChecksumType checksumType, int bytesPerChecksum)
   throws IOException {
 this.blockID = blockID;
@@ -154,7 +151,7 @@ public class BlockOutputStream extends OutputStream {
 this.streamBufferFlushSize = streamBufferFlushSize;
 this.streamBufferMaxSize = streamBufferMaxSize;
 this.watchTimeout = watchTimeout;
-this.bufferList = bufferList;
+this.bufferPool = bufferPool;
 this.checksumType = checksumType;
 this.bytesPerChecksum = bytesPerChecksum;
 
@@ -164,7 +161,6 @@ public class BlockOutputStream extends OutputStream {
 totalAckDataLength = 0;
 futureMap = new ConcurrentHashMap<>();
 totalDataFlushedLength = 0;
-currentBufferIndex = 0;
 writtenDataLength = 0;
 failedServers = Collections.emptyList();
   }
@@ -181,13 +177,6 @@ public class BlockOutputStream extends OutputStream {
 return writtenDataLength;
   }
 
-  private long computeBufferData() {
-int dataLength =
-bufferList.stream().mapToInt(Buffer::position).sum();
-Preconditions.checkState(dataLength <= streamBufferMaxSize);
-return dataLength;
-  }
-
   public List getFailedServers() {
 return failedServers;
   }
@@ -202,6 +191,7 @@ public class BlockOutputStream extends OutputStream {
 
   @Override
   public void write(byte[] b, int off, int len) throws IOException {
+checkOpen();
 if (b == null) {
   throw new NullPointerException();
 }
@@ -213,53 +203,40 @@ public class BlockOutputStream extends OutputStream {
   return;
 }
 while (len > 0) {
-  checkOpen();
   int writeLen;
-  al

[hadoop] branch ozone-0.4 updated: HDDS-1173. Fix a data corruption bug in BlockOutputStream. Contributed by Shashikant Banerjee.

2019-03-11 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 4373c12  HDDS-1173. Fix a data corruption bug in BlockOutputStream. 
Contributed by Shashikant Banerjee.
4373c12 is described below

commit 4373c127013c114ffec7f99262f2565009fa46b8
Author: Shashikant Banerjee 
AuthorDate: Mon Mar 11 23:15:49 2019 +0530

HDDS-1173. Fix a data corruption bug in BlockOutputStream. Contributed by 
Shashikant Banerjee.

(cherry picked from commit b4aa24d3c5ad1b9309a58795e4b48e567695c4e4)
---
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 232 +
 .../apache/hadoop/hdds/scm/storage/BufferPool.java | 106 ++
 .../ozone/client/io/BlockOutputStreamEntry.java|  23 +-
 .../hadoop/ozone/client/io/KeyOutputStream.java|  21 +-
 .../rpc/TestCloseContainerHandlingByClient.java|  62 --
 .../commandhandler/TestBlockDeletion.java  |   2 +-
 6 files changed, 279 insertions(+), 167 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 2e156b3..fe41f57 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -40,7 +40,6 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.nio.Buffer;
 import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.UUID;
@@ -87,7 +86,7 @@ public class BlockOutputStream extends OutputStream {
   private final long streamBufferFlushSize;
   private final long streamBufferMaxSize;
   private final long watchTimeout;
-  private List bufferList;
+  private BufferPool bufferPool;
   // The IOException will be set by response handling thread in case there is 
an
   // exception received in the response. If the exception is set, the next
   // request will fail upfront.
@@ -111,8 +110,6 @@ public class BlockOutputStream extends OutputStream {
   // map containing mapping for putBlock logIndex to to flushedDataLength Map.
   private ConcurrentHashMap commitIndex2flushedDataMap;
 
-  private int currentBufferIndex;
-
   private List failedServers;
 
   /**
@@ -124,7 +121,7 @@ public class BlockOutputStream extends OutputStream {
* @param pipeline pipeline where block will be written
* @param traceID  container protocol call args
* @param chunkSizechunk size
-   * @param bufferList   list of byte buffers
+   * @param bufferPool   pool of buffers
* @param streamBufferFlushSize flush size
* @param streamBufferMaxSize   max size of the currentBuffer
* @param watchTimeout  watch timeout
@@ -135,7 +132,7 @@ public class BlockOutputStream extends OutputStream {
   public BlockOutputStream(BlockID blockID, String key,
   XceiverClientManager xceiverClientManager, Pipeline pipeline,
   String traceID, int chunkSize, long streamBufferFlushSize,
-  long streamBufferMaxSize, long watchTimeout, List bufferList,
+  long streamBufferMaxSize, long watchTimeout, BufferPool bufferPool,
   ChecksumType checksumType, int bytesPerChecksum)
   throws IOException {
 this.blockID = blockID;
@@ -154,7 +151,7 @@ public class BlockOutputStream extends OutputStream {
 this.streamBufferFlushSize = streamBufferFlushSize;
 this.streamBufferMaxSize = streamBufferMaxSize;
 this.watchTimeout = watchTimeout;
-this.bufferList = bufferList;
+this.bufferPool = bufferPool;
 this.checksumType = checksumType;
 this.bytesPerChecksum = bytesPerChecksum;
 
@@ -164,7 +161,6 @@ public class BlockOutputStream extends OutputStream {
 totalAckDataLength = 0;
 futureMap = new ConcurrentHashMap<>();
 totalDataFlushedLength = 0;
-currentBufferIndex = 0;
 writtenDataLength = 0;
 failedServers = Collections.emptyList();
   }
@@ -181,13 +177,6 @@ public class BlockOutputStream extends OutputStream {
 return writtenDataLength;
   }
 
-  private long computeBufferData() {
-int dataLength =
-bufferList.stream().mapToInt(Buffer::position).sum();
-Preconditions.checkState(dataLength <= streamBufferMaxSize);
-return dataLength;
-  }
-
   public List getFailedServers() {
 return failedServers;
   }
@@ -202,6 +191,7 @@ public class BlockOutputStream extends OutputStream {
 
   @Override
   public void write(byte[] b, int off, int len) throws IOException {
+checkOpen();
 if (b == null) {
   throw new NullPointerException();
 }
@@ -213,53 +203,40 @@ public class BlockOutputStream extends OutputStream {

[hadoop] branch trunk updated: HDDS-1257. Incorrect object because of mismatch in block lengths. Contributed by Shashikant Banerjee.

2019-03-14 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d60673c  HDDS-1257. Incorrect object because of mismatch in block 
lengths. Contributed by Shashikant Banerjee.
d60673c is described below

commit d60673c47077d69320ae1bd37c6b74489bef25f7
Author: Shashikant Banerjee 
AuthorDate: Thu Mar 14 19:32:36 2019 +0530

HDDS-1257. Incorrect object because of mismatch in block lengths. 
Contributed by Shashikant Banerjee.
---
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 38 ++
 1 file changed, 25 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index fe41f57..13913ee 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -46,6 +46,7 @@ import java.util.UUID;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.concurrent.*;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
 .putBlockAsync;
@@ -108,7 +109,10 @@ public class BlockOutputStream extends OutputStream {
   CompletableFuture>
   futureMap;
   // map containing mapping for putBlock logIndex to to flushedDataLength Map.
-  private ConcurrentHashMap commitIndex2flushedDataMap;
+
+  // The map should maintain the keys (logIndexes) in order so that while
+  // removing we always end up updating incremented data flushed length.
+  private ConcurrentSkipListMap commitIndex2flushedDataMap;
 
   private List failedServers;
 
@@ -157,7 +161,7 @@ public class BlockOutputStream extends OutputStream {
 
 // A single thread executor handle the responses of async requests
 responseExecutor = Executors.newSingleThreadExecutor();
-commitIndex2flushedDataMap = new ConcurrentHashMap<>();
+commitIndex2flushedDataMap = new ConcurrentSkipListMap<>();
 totalAckDataLength = 0;
 futureMap = new ConcurrentHashMap<>();
 totalDataFlushedLength = 0;
@@ -206,7 +210,7 @@ public class BlockOutputStream extends OutputStream {
   int writeLen;
 
   // Allocate a buffer if needed. The buffer will be allocated only
-  // once as needed and will be reused again for mutiple blockOutputStream
+  // once as needed and will be reused again for multiple blockOutputStream
   // entries.
   ByteBuffer  currentBuffer = bufferPool.allocateBufferIfNeeded();
   int pos = currentBuffer.position();
@@ -281,10 +285,18 @@ public class BlockOutputStream extends OutputStream {
* just update the totalAckDataLength. In case of failure,
* we will read the data starting from totalAckDataLength.
*/
-  private void updateFlushIndex(long index) {
-if (!commitIndex2flushedDataMap.isEmpty()) {
+  private void updateFlushIndex(List indexes) {
+Preconditions.checkArgument(!commitIndex2flushedDataMap.isEmpty());
+for (long index : indexes) {
   Preconditions.checkState(commitIndex2flushedDataMap.containsKey(index));
-  totalAckDataLength = commitIndex2flushedDataMap.remove(index);
+  long length = commitIndex2flushedDataMap.remove(index);
+
+  // totalAckDataLength replicated yet should always be less than equal to
+  // the current length being returned from commitIndex2flushedDataMap.
+  // The below precondition would ensure commitIndex2flushedDataMap entries
+  // are removed in order of the insertion to the map.
+  Preconditions.checkArgument(totalAckDataLength < length);
+  totalAckDataLength = length;
   LOG.debug("Total data successfully replicated: " + totalAckDataLength);
   futureMap.remove(totalAckDataLength);
   // Flush has been committed to required servers successful.
@@ -325,13 +337,13 @@ public class BlockOutputStream extends OutputStream {
   }
 
   private void adjustBuffers(long commitIndex) {
-commitIndex2flushedDataMap.keySet().stream().forEach(index -> {
-  if (index <= commitIndex) {
-updateFlushIndex(index);
-  } else {
-return;
-  }
-});
+List keyList = commitIndex2flushedDataMap.keySet().stream()
+.filter(p -> p <= commitIndex).collect(Collectors.toList());
+if (keyList.isEmpty()) {
+  return;
+} else {
+  updateFlushIndex(keyList);
+}
   }
 
   // It may happen that once the exception is encountered , we still might


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1257. Incorrect object because of mismatch in block lengths. Contributed by Shashikant Banerjee.

2019-03-14 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 8614da3  HDDS-1257. Incorrect object because of mismatch in block 
lengths. Contributed by Shashikant Banerjee.
8614da3 is described below

commit 8614da3723f3470a13043fc8ffd87d4c27b052de
Author: Shashikant Banerjee 
AuthorDate: Thu Mar 14 19:32:36 2019 +0530

HDDS-1257. Incorrect object because of mismatch in block lengths. 
Contributed by Shashikant Banerjee.

(cherry picked from commit d60673c47077d69320ae1bd37c6b74489bef25f7)
---
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 38 ++
 1 file changed, 25 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index fe41f57..13913ee 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -46,6 +46,7 @@ import java.util.UUID;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.concurrent.*;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
 .putBlockAsync;
@@ -108,7 +109,10 @@ public class BlockOutputStream extends OutputStream {
   CompletableFuture>
   futureMap;
   // map containing mapping for putBlock logIndex to to flushedDataLength Map.
-  private ConcurrentHashMap commitIndex2flushedDataMap;
+
+  // The map should maintain the keys (logIndexes) in order so that while
+  // removing we always end up updating incremented data flushed length.
+  private ConcurrentSkipListMap commitIndex2flushedDataMap;
 
   private List failedServers;
 
@@ -157,7 +161,7 @@ public class BlockOutputStream extends OutputStream {
 
 // A single thread executor handle the responses of async requests
 responseExecutor = Executors.newSingleThreadExecutor();
-commitIndex2flushedDataMap = new ConcurrentHashMap<>();
+commitIndex2flushedDataMap = new ConcurrentSkipListMap<>();
 totalAckDataLength = 0;
 futureMap = new ConcurrentHashMap<>();
 totalDataFlushedLength = 0;
@@ -206,7 +210,7 @@ public class BlockOutputStream extends OutputStream {
   int writeLen;
 
   // Allocate a buffer if needed. The buffer will be allocated only
-  // once as needed and will be reused again for mutiple blockOutputStream
+  // once as needed and will be reused again for multiple blockOutputStream
   // entries.
   ByteBuffer  currentBuffer = bufferPool.allocateBufferIfNeeded();
   int pos = currentBuffer.position();
@@ -281,10 +285,18 @@ public class BlockOutputStream extends OutputStream {
* just update the totalAckDataLength. In case of failure,
* we will read the data starting from totalAckDataLength.
*/
-  private void updateFlushIndex(long index) {
-if (!commitIndex2flushedDataMap.isEmpty()) {
+  private void updateFlushIndex(List indexes) {
+Preconditions.checkArgument(!commitIndex2flushedDataMap.isEmpty());
+for (long index : indexes) {
   Preconditions.checkState(commitIndex2flushedDataMap.containsKey(index));
-  totalAckDataLength = commitIndex2flushedDataMap.remove(index);
+  long length = commitIndex2flushedDataMap.remove(index);
+
+  // totalAckDataLength replicated yet should always be less than equal to
+  // the current length being returned from commitIndex2flushedDataMap.
+  // The below precondition would ensure commitIndex2flushedDataMap entries
+  // are removed in order of the insertion to the map.
+  Preconditions.checkArgument(totalAckDataLength < length);
+  totalAckDataLength = length;
   LOG.debug("Total data successfully replicated: " + totalAckDataLength);
   futureMap.remove(totalAckDataLength);
   // Flush has been committed to required servers successful.
@@ -325,13 +337,13 @@ public class BlockOutputStream extends OutputStream {
   }
 
   private void adjustBuffers(long commitIndex) {
-commitIndex2flushedDataMap.keySet().stream().forEach(index -> {
-  if (index <= commitIndex) {
-updateFlushIndex(index);
-  } else {
-return;
-  }
-});
+List keyList = commitIndex2flushedDataMap.keySet().stream()
+.filter(p -> p <= commitIndex).collect(Collectors.toList());
+if (keyList.isEmpty()) {
+  return;
+} else {
+  updateFlushIndex(keyList);
+}
   }
 
   // It may happen that once the exception is encountered , we still might


-
To unsubscribe, e-mail: common-commit

[hadoop] branch trunk updated: HDDS-1098. Introduce Retry Policy in Ozone Client. Contributed by Shashikant Banerjee.

2019-03-15 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 155ab6d  HDDS-1098. Introduce Retry Policy in Ozone Client. 
Contributed by Shashikant Banerjee.
155ab6d is described below

commit 155ab6d5d8ab4a80019e65351572320502d8a510
Author: Shashikant Banerjee 
AuthorDate: Fri Mar 15 15:40:59 2019 +0530

HDDS-1098. Introduce Retry Policy in Ozone Client. Contributed by 
Shashikant Banerjee.
---
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  19 ++-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   4 +-
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |   5 +
 .../main/java/org/apache/ratis/RatisHelper.java|  33 -
 .../common/src/main/resources/ozone-default.xml|  16 ++-
 .../transport/server/ratis/XceiverServerRatis.java |  18 +--
 .../TestCloseContainerCommandHandler.java  |   8 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java  |  10 +-
 .../hadoop/ozone/client/OzoneClientUtils.java  |  11 ++
 .../hadoop/ozone/client/io/KeyOutputStream.java| 149 +
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  22 +--
 .../org/apache/hadoop/ozone/RatisTestHelper.java   |   5 +-
 .../web/storage/DistributedStorageHandler.java |  11 +-
 13 files changed, 206 insertions(+), 105 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 673a82b..65241bf 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -47,6 +47,7 @@ import org.apache.ratis.protocol.RaftClientReply;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.apache.ratis.util.TimeDuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -74,6 +75,8 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 final String rpcType = ozoneConf
 .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
 ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
+final TimeDuration clientRequestTimeout =
+RatisHelper.getClientRequestTimeout(ozoneConf);
 final int maxOutstandingRequests =
 HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
 final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
@@ -81,7 +84,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 SecurityConfig(ozoneConf));
 return new XceiverClientRatis(pipeline,
 SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
-retryPolicy, tlsConfig);
+retryPolicy, tlsConfig, clientRequestTimeout);
   }
 
   private final Pipeline pipeline;
@@ -90,6 +93,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   private final int maxOutstandingRequests;
   private final RetryPolicy retryPolicy;
   private final GrpcTlsConfig tlsConfig;
+  private final TimeDuration clientRequestTimeout;
 
   // Map to track commit index at every server
   private final ConcurrentHashMap commitInfoMap;
@@ -102,7 +106,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
*/
   private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
   int maxOutStandingChunks, RetryPolicy retryPolicy,
-  GrpcTlsConfig tlsConfig) {
+  GrpcTlsConfig tlsConfig, TimeDuration timeout) {
 super();
 this.pipeline = pipeline;
 this.rpcType = rpcType;
@@ -111,6 +115,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 commitInfoMap = new ConcurrentHashMap<>();
 watchClient = null;
 this.tlsConfig = tlsConfig;
+this.clientRequestTimeout = timeout;
   }
 
   private void updateCommitInfosMap(
@@ -160,7 +165,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 // requests to be handled by raft client
 if (!client.compareAndSet(null,
 RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
-maxOutstandingRequests, tlsConfig))) {
+maxOutstandingRequests, tlsConfig, clientRequestTimeout))) {
   throw new IllegalStateException("Client is already connected.");
 }
   }
@@ -243,7 +248,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 if (watchClient == null) {
   watchClient =
   RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
-  maxOutstandingRequests, tlsConfig);
+  maxOutstandingRequests, tlsConfig, clientRequestTimeout);
 }
 CompletableFuture replyFutu

[hadoop] branch ozone-0.4 updated: HDDS-1098. Introduce Retry Policy in Ozone Client. Contributed by Shashikant Banerjee.

2019-03-15 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 8bd13ff  HDDS-1098. Introduce Retry Policy in Ozone Client. 
Contributed by Shashikant Banerjee.
8bd13ff is described below

commit 8bd13ffa1012eb66e7cbb820af410e68882a5a95
Author: Shashikant Banerjee 
AuthorDate: Fri Mar 15 15:40:59 2019 +0530

HDDS-1098. Introduce Retry Policy in Ozone Client. Contributed by 
Shashikant Banerjee.

(cherry picked from commit 155ab6d5d8ab4a80019e65351572320502d8a510)
---
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  19 ++-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   4 +-
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |   5 +
 .../main/java/org/apache/ratis/RatisHelper.java|  33 -
 .../common/src/main/resources/ozone-default.xml|  16 ++-
 .../transport/server/ratis/XceiverServerRatis.java |  18 +--
 .../TestCloseContainerCommandHandler.java  |   8 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java  |  10 +-
 .../hadoop/ozone/client/OzoneClientUtils.java  |  11 ++
 .../hadoop/ozone/client/io/KeyOutputStream.java| 149 +
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  22 +--
 .../org/apache/hadoop/ozone/RatisTestHelper.java   |   5 +-
 .../web/storage/DistributedStorageHandler.java |  11 +-
 13 files changed, 206 insertions(+), 105 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 673a82b..65241bf 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -47,6 +47,7 @@ import org.apache.ratis.protocol.RaftClientReply;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.apache.ratis.util.TimeDuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -74,6 +75,8 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 final String rpcType = ozoneConf
 .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
 ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
+final TimeDuration clientRequestTimeout =
+RatisHelper.getClientRequestTimeout(ozoneConf);
 final int maxOutstandingRequests =
 HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
 final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
@@ -81,7 +84,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 SecurityConfig(ozoneConf));
 return new XceiverClientRatis(pipeline,
 SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
-retryPolicy, tlsConfig);
+retryPolicy, tlsConfig, clientRequestTimeout);
   }
 
   private final Pipeline pipeline;
@@ -90,6 +93,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   private final int maxOutstandingRequests;
   private final RetryPolicy retryPolicy;
   private final GrpcTlsConfig tlsConfig;
+  private final TimeDuration clientRequestTimeout;
 
   // Map to track commit index at every server
   private final ConcurrentHashMap commitInfoMap;
@@ -102,7 +106,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
*/
   private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
   int maxOutStandingChunks, RetryPolicy retryPolicy,
-  GrpcTlsConfig tlsConfig) {
+  GrpcTlsConfig tlsConfig, TimeDuration timeout) {
 super();
 this.pipeline = pipeline;
 this.rpcType = rpcType;
@@ -111,6 +115,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 commitInfoMap = new ConcurrentHashMap<>();
 watchClient = null;
 this.tlsConfig = tlsConfig;
+this.clientRequestTimeout = timeout;
   }
 
   private void updateCommitInfosMap(
@@ -160,7 +165,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 // requests to be handled by raft client
 if (!client.compareAndSet(null,
 RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
-maxOutstandingRequests, tlsConfig))) {
+maxOutstandingRequests, tlsConfig, clientRequestTimeout))) {
   throw new IllegalStateException("Client is already connected.");
 }
   }
@@ -243,7 +248,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 if (watchClient == null) {
   watchClient =
   RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
-  maxOutstandingRequests, tlsConfig);
+  maxOutstandingReq

[hadoop] branch trunk updated: HDDS-1088. Add blockade Tests to test Replica Manager. Contributed by Nilotpal Nandi.

2019-03-15 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a7f5e74  HDDS-1088. Add blockade Tests to test Replica Manager. 
Contributed by Nilotpal Nandi.
a7f5e74 is described below

commit a7f5e742a6e2ce5e12d127fa71c6280f05ce54d3
Author: Shashikant Banerjee 
AuthorDate: Fri Mar 15 20:54:41 2019 +0530

HDDS-1088. Add blockade Tests to test Replica Manager. Contributed by 
Nilotpal Nandi.
---
 hadoop-ozone/dev-support/docker/Dockerfile |   3 +
 hadoop-ozone/dist/src/main/blockade/README.md  |  10 +
 .../main/blockade/clusterUtils/cluster_utils.py| 479 +++--
 hadoop-ozone/dist/src/main/blockade/conftest.py| 139 +++---
 .../blockade/test_blockade_datanode_isolation.py   | 153 ---
 .../main/blockade/test_blockade_scm_isolation.py   | 163 ---
 6 files changed, 535 insertions(+), 412 deletions(-)

diff --git a/hadoop-ozone/dev-support/docker/Dockerfile 
b/hadoop-ozone/dev-support/docker/Dockerfile
index a84367e..045e1f6 100644
--- a/hadoop-ozone/dev-support/docker/Dockerfile
+++ b/hadoop-ozone/dev-support/docker/Dockerfile
@@ -46,6 +46,9 @@ RUN mkdir -p /opt && \
 #Install docker-compose
 RUN pip install docker-compose
 
+#Install pytest==2.8.7
+RUN pip install pytest==2.8.7
+
 ENV PATH=$PATH:/opt/findbugs/bin
 
 RUN addgroup -g 1000 default && \
diff --git a/hadoop-ozone/dist/src/main/blockade/README.md 
b/hadoop-ozone/dist/src/main/blockade/README.md
index 9ece997..fb58205 100644
--- a/hadoop-ozone/dist/src/main/blockade/README.md
+++ b/hadoop-ozone/dist/src/main/blockade/README.md
@@ -41,4 +41,14 @@ cd $DIRECTORY_OF_OZONE
 python -m pytest -s  blockade/ --containerStatusSleep=
 
 e.g: python -m pytest -s  blockade/ --containerStatusSleep=720
+```
+
+By default, second phase of the tests will not be run.
+In order to run the second phase of the tests, you can run following
+command-lines:
+
+```
+cd $DIRECTORY_OF_OZONE
+python -m pytest -s  blockade/ --runSecondPhase=true
+
 ```
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py 
b/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py
index bf0b28f..baa3960 100644
--- a/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py
+++ b/hadoop-ozone/dist/src/main/blockade/clusterUtils/cluster_utils.py
@@ -21,280 +21,289 @@ import subprocess
 import logging
 import time
 import re
-import yaml
 import os
+import yaml
 
 
 logger = logging.getLogger(__name__)
 
 
 class ClusterUtils(object):
+  """
+  This class contains all cluster related operations.
+  """
 
-@classmethod
-def cluster_setup(cls, docker_compose_file, datanode_count):
-"""start a blockade cluster"""
-logger.info("compose file :%s", docker_compose_file)
-logger.info("number of DNs :%d", datanode_count)
-call(["docker-compose", "-f", docker_compose_file, "down"])
-call(["docker-compose", "-f", docker_compose_file, "up", "-d",
-  "--scale", "datanode=" + str(datanode_count)])
+  @classmethod
+  def cluster_setup(cls, docker_compose_file, datanode_count,
+destroy_existing_cluster=True):
+"""start a blockade cluster"""
+logger.info("compose file :%s", docker_compose_file)
+logger.info("number of DNs :%d", datanode_count)
+if destroy_existing_cluster:
+  call(["docker-compose", "-f", docker_compose_file, "down"])
+call(["docker-compose", "-f", docker_compose_file, "up", "-d",
+  "--scale", "datanode=" + str(datanode_count)])
 
-logger.info("Waiting 30s for cluster start up...")
-time.sleep(30)
-output = subprocess.check_output(["docker-compose", "-f",
-  docker_compose_file, "ps"])
-output_array = output.split("\n")[2:-1]
+logger.info("Waiting 30s for cluster start up...")
+time.sleep(30)
+output = subprocess.check_output(["docker-compose", "-f",
+  docker_compose_file, "ps"])
+output_array = output.split("\n")[2:-1]
 
-container_list = []
-for out in output_array:
-container = out.split(" ")[0]
-container_list.append(container)
-call(["blockade", "add", container])
-time.sleep(2)
+container_list = []
+for out in output_array:
+  containe

[hadoop] branch ozone-0.4 updated: HDDS-1317. KeyOutputStream#write throws ArrayIndexOutOfBoundsException when running RandomWrite MR examples. Contributed by Shashikant Banerjee.

2019-03-25 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new eed623a  HDDS-1317. KeyOutputStream#write throws 
ArrayIndexOutOfBoundsException when running RandomWrite MR examples. 
Contributed by Shashikant Banerjee.
eed623a is described below

commit eed623ad618d06784858f793d72ecc01126753ef
Author: Shashikant Banerjee 
AuthorDate: Mon Mar 25 15:41:20 2019 +0530

HDDS-1317. KeyOutputStream#write throws ArrayIndexOutOfBoundsException when 
running RandomWrite MR examples. Contributed by Shashikant Banerjee.
---
 .../hadoop/hdds/scm/XceiverClientMetrics.java  |  20 +
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  90 +--
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 199 --
 .../apache/hadoop/hdds/scm/storage/BufferPool.java |  16 +-
 .../org/apache/hadoop/hdds/client/BlockID.java |   4 +-
 .../ozone/client/io/BlockOutputStreamEntry.java|   6 +-
 .../hadoop/ozone/client/io/KeyOutputStream.java|  59 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   7 +
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  12 +-
 .../ozone/client/rpc/TestBlockOutputStream.java| 690 +
 .../rpc/TestBlockOutputStreamWithFailures.java | 546 
 .../rpc/TestCloseContainerHandlingByClient.java|  11 +-
 .../ozone/container/ContainerTestHelper.java   |  17 +
 .../commandhandler/TestBlockDeletion.java  |   2 +-
 14 files changed, 1543 insertions(+), 136 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
index a430400..6c40921 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.metrics2.MetricsSystem;
@@ -37,7 +38,9 @@ public class XceiverClientMetrics {
   .getSimpleName();
 
   private @Metric MutableCounterLong pendingOps;
+  private @Metric MutableCounterLong totalOps;
   private MutableCounterLong[] pendingOpsArray;
+  private MutableCounterLong[] opsArray;
   private MutableRate[] containerOpsLatency;
   private MetricsRegistry registry;
 
@@ -46,12 +49,17 @@ public class XceiverClientMetrics {
 this.registry = new MetricsRegistry(SOURCE_NAME);
 
 this.pendingOpsArray = new MutableCounterLong[numEnumEntries];
+this.opsArray = new MutableCounterLong[numEnumEntries];
 this.containerOpsLatency = new MutableRate[numEnumEntries];
 for (int i = 0; i < numEnumEntries; i++) {
   pendingOpsArray[i] = registry.newCounter(
   "numPending" + ContainerProtos.Type.forNumber(i + 1),
   "number of pending" + ContainerProtos.Type.forNumber(i + 1) + " ops",
   (long) 0);
+  opsArray[i] = registry
+  .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1),
+  "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops",
+  (long) 0);
 
   containerOpsLatency[i] = registry.newRate(
   ContainerProtos.Type.forNumber(i + 1) + "Latency",
@@ -68,6 +76,8 @@ public class XceiverClientMetrics {
 
   public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) {
 pendingOps.incr();
+totalOps.incr();
+opsArray[type.ordinal()].incr();
 pendingOpsArray[type.ordinal()].incr();
   }
 
@@ -85,6 +95,16 @@ public class XceiverClientMetrics {
 return pendingOpsArray[type.ordinal()].value();
   }
 
+  @VisibleForTesting
+  public long getTotalOpCount() {
+return totalOps.value();
+  }
+
+  @VisibleForTesting
+  public long getContainerOpCountMetrics(ContainerProtos.Type type) {
+return opsArray[type.ordinal()].value();
+  }
+
   public void unRegister() {
 MetricsSystem ms = DefaultMetricsSystem.instance();
 ms.unregisterSource(SOURCE_NAME);
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 65241bf..a2e65e2 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.Hd

[hadoop] branch trunk updated: HDFS-14499. Misleading REM_QUOTA value with snapshot and trash feature enabled for a directory. Contributed by Shashikant Banerjee.

2019-07-12 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f9fab9f  HDFS-14499. Misleading REM_QUOTA value with snapshot and 
trash feature enabled for a directory. Contributed by Shashikant Banerjee.
f9fab9f is described below

commit f9fab9f22a53757f8081e8224e0d4b557fe6a0e2
Author: Shashikant Banerjee 
AuthorDate: Fri Jul 12 15:41:34 2019 +0530

HDFS-14499. Misleading REM_QUOTA value with snapshot and trash feature 
enabled for a directory. Contributed by Shashikant Banerjee.
---
 .../hdfs/server/namenode/INodeReference.java   | 17 +--
 .../TestGetContentSummaryWithSnapshot.java | 33 --
 2 files changed, 34 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 8655bb3..8de0ed6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -500,14 +500,15 @@ public abstract class INodeReference extends INode {
 
 @Override
 public final ContentSummaryComputationContext computeContentSummary(
-int snapshotId, ContentSummaryComputationContext summary) {
-  final int s = snapshotId < lastSnapshotId ? snapshotId : lastSnapshotId;
-  // only count storagespace for WithName
-  final QuotaCounts q = computeQuotaUsage(
-  summary.getBlockStoragePolicySuite(), getStoragePolicyID(), false, 
s);
-  summary.getCounts().addContent(Content.DISKSPACE, q.getStorageSpace());
-  summary.getCounts().addTypeSpaces(q.getTypeSpaces());
-  return summary;
+int snapshotId, ContentSummaryComputationContext summary)
+throws AccessControlException {
+  Preconditions.checkState(snapshotId == Snapshot.CURRENT_STATE_ID
+  || this.lastSnapshotId >= snapshotId);
+  final INode referred =
+  this.getReferredINode().asReference().getReferredINode();
+  int id = snapshotId != Snapshot.CURRENT_STATE_ID ? snapshotId :
+  this.lastSnapshotId;
+  return referred.computeContentSummary(id, summary);
 }
 
 @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java
index 1c16818..9aadeb2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java
@@ -90,18 +90,22 @@ public class TestGetContentSummaryWithSnapshot {
 final Path foo = new Path("/foo");
 final Path bar = new Path(foo, "bar");
 final Path baz = new Path(bar, "baz");
+final Path qux = new Path(bar, "qux");
+final Path temp = new Path("/temp");
 
 dfs.mkdirs(bar);
+dfs.mkdirs(temp);
 dfs.allowSnapshot(foo);
 dfs.createSnapshot(foo, "s1");
 
 DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
+DFSTestUtil.createFile(dfs, qux, 10, REPLICATION, 0L);
 
 ContentSummary summary = cluster.getNameNodeRpc().getContentSummary(
 bar.toString());
 Assert.assertEquals(1, summary.getDirectoryCount());
-Assert.assertEquals(1, summary.getFileCount());
-Assert.assertEquals(10, summary.getLength());
+Assert.assertEquals(2, summary.getFileCount());
+Assert.assertEquals(20, summary.getLength());
 
 final Path barS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar");
 summary = cluster.getNameNodeRpc().getContentSummary(barS1.toString());
@@ -112,8 +116,8 @@ public class TestGetContentSummaryWithSnapshot {
 // also check /foo and /foo/.snapshot/s1
 summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
 Assert.assertEquals(2, summary.getDirectoryCount());
-Assert.assertEquals(1, summary.getFileCount());
-Assert.assertEquals(10, summary.getLength());
+Assert.assertEquals(2, summary.getFileCount());
+Assert.assertEquals(20, summary.getLength());
 
 final Path fooS1 = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
 summary = cluster.getNameNodeRpc().getContentSummary(fooS1.toString());
@@ -127,14 +131,14 @@ public class TestGetContentSummaryWithSnapshot {
 summary = cluster.getNameNodeRpc().ge

[hadoop] branch trunk updated: HDDS-1492. Generated chunk size name too long. Contributed by Shashikannt Banerjee. (#1084)

2019-07-16 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d21eccf  HDDS-1492. Generated chunk size name too long. Contributed  
by Shashikannt Banerjee. (#1084)
d21eccf is described below

commit d21eccf8bae611298de7dc3324eb4ac0f1473791
Author: Shashikant Banerjee 
AuthorDate: Tue Jul 16 18:01:13 2019 +0530

HDDS-1492. Generated chunk size name too long. Contributed  by Shashikannt 
Banerjee. (#1084)
---
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 18 +-
 .../hadoop/ozone/client/io/BlockOutputStreamEntry.java |  2 +-
 2 files changed, 6 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 9e40f3e..27b6624 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.common.ChecksumData;
 import org.apache.hadoop.ozone.common.OzoneChecksumException;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
@@ -44,7 +43,6 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
 import java.util.Collections;
-import java.util.UUID;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.Map;
@@ -81,14 +79,12 @@ public class BlockOutputStream extends OutputStream {
   LoggerFactory.getLogger(BlockOutputStream.class);
 
   private volatile BlockID blockID;
-  private final String key;
 
   private final BlockData.Builder containerBlockData;
   private XceiverClientManager xceiverClientManager;
   private XceiverClientSpi xceiverClient;
   private final ContainerProtos.ChecksumType checksumType;
   private final int bytesPerChecksum;
-  private final String streamId;
   private int chunkIndex;
   private int chunkSize;
   private final long streamBufferFlushSize;
@@ -125,7 +121,6 @@ public class BlockOutputStream extends OutputStream {
* Creates a new BlockOutputStream.
*
* @param blockID  block ID
-   * @param key  chunk key
* @param xceiverClientManager client manager that controls client
* @param pipeline pipeline where block will be written
* @param chunkSizechunk size
@@ -137,14 +132,13 @@ public class BlockOutputStream extends OutputStream {
* @param bytesPerChecksum  Bytes per checksum
*/
   @SuppressWarnings("parameternumber")
-  public BlockOutputStream(BlockID blockID, String key,
+  public BlockOutputStream(BlockID blockID,
   XceiverClientManager xceiverClientManager, Pipeline pipeline,
-  int chunkSize, long streamBufferFlushSize,
-  long streamBufferMaxSize, long watchTimeout, BufferPool bufferPool,
-  ChecksumType checksumType, int bytesPerChecksum)
+  int chunkSize, long streamBufferFlushSize, long streamBufferMaxSize,
+  long watchTimeout, BufferPool bufferPool, ChecksumType checksumType,
+  int bytesPerChecksum)
   throws IOException {
 this.blockID = blockID;
-this.key = key;
 this.chunkSize = chunkSize;
 KeyValue keyValue =
 KeyValue.newBuilder().setKey("TYPE").setValue("KEY").build();
@@ -153,7 +147,6 @@ public class BlockOutputStream extends OutputStream {
 .addMetadata(keyValue);
 this.xceiverClientManager = xceiverClientManager;
 this.xceiverClient = xceiverClientManager.acquireClient(pipeline);
-this.streamId = UUID.randomUUID().toString();
 this.chunkIndex = 0;
 this.streamBufferFlushSize = streamBufferFlushSize;
 this.streamBufferMaxSize = streamBufferMaxSize;
@@ -598,8 +591,7 @@ public class BlockOutputStream extends OutputStream {
 Checksum checksum = new Checksum(checksumType, bytesPerChecksum);
 ChecksumData checksumData = checksum.computeChecksum(chunk);
 ChunkInfo chunkInfo = ChunkInfo.newBuilder()
-.setChunkName(DigestUtils.md5Hex(key) + "_stream_" + streamId +
-"_chunk_" + ++chunkIndex)
+.setChunkName(blockID.getLocalID() + "_chunk_" + ++chunkIndex)
 .setOffset(0)
 .setLen(effectiveChunkSize)
 .setChecksumData(checksumData.getProtoBufMessage())
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/Block

[hadoop] branch trunk updated: HDDS-1780. TestFailureHandlingByClient tests are flaky. Contributed by Shashikant Banerjee. (#1073)

2019-07-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ccceedb  HDDS-1780. TestFailureHandlingByClient tests are flaky. 
Contributed by Shashikant Banerjee. (#1073)
ccceedb is described below

commit ccceedb432bc2379e4480f8a9c5ebb181531c04e
Author: Shashikant Banerjee 
AuthorDate: Thu Jul 18 16:01:58 2019 +0530

HDDS-1780. TestFailureHandlingByClient tests are flaky. Contributed by 
Shashikant Banerjee. (#1073)
---
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |   6 +-
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |   3 +-
 .../client/rpc/TestFailureHandlingByClient.java|  52 +--
 .../rpc/TestMultiBlockWritesWithDnFailures.java| 168 +
 4 files changed, 180 insertions(+), 49 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 77acc42..eee813f 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -285,7 +285,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 }
 break;
   } catch (ExecutionException | InterruptedException | IOException e) {
-LOG.debug("Failed to execute command " + request + " on datanode " + dn
+LOG.error("Failed to execute command " + request + " on datanode " + dn
 .getUuidString(), e);
 if (!(e instanceof IOException)) {
   if (Status.fromThrowable(e.getCause()).getCode()
@@ -306,8 +306,8 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   return reply;
 } else {
   Preconditions.checkNotNull(ioException);
-  LOG.error("Failed to execute command " + request + " on the pipeline "
-  + pipeline.getId());
+  LOG.error("Failed to execute command {} on the pipeline {}.", request,
+  pipeline);
   throw ioException;
 }
   }
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 27b6624..88d178c 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -42,7 +42,6 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
-import java.util.Collections;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.Map;
@@ -160,7 +159,7 @@ public class BlockOutputStream extends OutputStream {
 bufferList = null;
 totalDataFlushedLength = 0;
 writtenDataLength = 0;
-failedServers = Collections.emptyList();
+failedServers = new ArrayList<>(0);
 ioException = new AtomicReference<>(null);
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index d958076..7c014cc 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -81,11 +82,16 @@ public class TestFailureHandlingByClient {
 conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 5,
 TimeUnit.SECONDS);
 conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, 
TimeUnit.MILLISECONDS);
-conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
-conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 5);
+conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
+conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 10);
 conf.setTimeDuration(
 OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
 1, TimeUnit.SECONDS);
+conf.setTimeDuration(
+

[hadoop] branch trunk updated: HDDS-1654. Ensure container state on datanode gets synced to disk whennever state change happens. Cotributed by Shashikant Banerjee. (#923)

2019-07-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 20cf50c  HDDS-1654. Ensure container state on datanode gets synced to 
disk whennever state change happens. Cotributed by Shashikant Banerjee. (#923)
20cf50c is described below

commit 20cf50c6d054324503225c772256f0f91678599a
Author: Shashikant Banerjee 
AuthorDate: Thu Jul 18 17:09:05 2019 +0530

HDDS-1654. Ensure container state on datanode gets synced to disk whennever 
state change happens. Cotributed by Shashikant Banerjee. (#923)
---
 .../hadoop/ozone/container/common/impl/ContainerDataYaml.java| 9 +++--
 .../common/transport/server/ratis/ContainerStateMachine.java | 3 +++
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
index 1571944..1f9966c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
@@ -80,6 +80,7 @@ public final class ContainerDataYaml {
   public static void createContainerFile(ContainerType containerType,
   ContainerData containerData, File containerFile) throws IOException {
 Writer writer = null;
+FileOutputStream out = null;
 try {
   // Create Yaml for given container type
   Yaml yaml = getYamlForContainerType(containerType);
@@ -87,13 +88,17 @@ public final class ContainerDataYaml {
   containerData.computeAndSetChecksum(yaml);
 
   // Write the ContainerData with checksum to Yaml file.
-  writer = new OutputStreamWriter(new FileOutputStream(
-  containerFile), "UTF-8");
+  out = new FileOutputStream(
+  containerFile);
+  writer = new OutputStreamWriter(out, "UTF-8");
   yaml.dump(containerData, writer);
 
 } finally {
   try {
 if (writer != null) {
+  writer.flush();
+  // make sure the container metadata is synced to disk.
+  out.getFD().sync();
   writer.close();
 }
   } catch (IOException ex) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 5c3fa85..d82d114 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -262,6 +262,9 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile);
   try (FileOutputStream fos = new FileOutputStream(snapshotFile)) {
 persistContainerSet(fos);
+fos.flush();
+// make sure the snapshot file is synced
+fos.getFD().sync();
   } catch (IOException ioe) {
 LOG.info("{}: Failed to write snapshot at:{} file {}", gid, ti,
 snapshotFile);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1779. TestWatchForCommit tests are flaky.Contributed by Shashikant Banerjee. (#1071)

2019-07-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8ef2365  HDDS-1779. TestWatchForCommit tests are flaky.Contributed by 
Shashikant Banerjee. (#1071)
8ef2365 is described below

commit 8ef2365ffd8fca888b23ad0e3afb7b0e09e3a5e0
Author: Shashikant Banerjee 
AuthorDate: Thu Jul 18 19:57:12 2019 +0530

HDDS-1779. TestWatchForCommit tests are flaky.Contributed by Shashikant 
Banerjee. (#1071)
---
 .../ozone/client/rpc/Test2WayCommitInRatis.java| 156 +
 .../ozone/client/rpc/TestWatchForCommit.java   | 100 -
 2 files changed, 187 insertions(+), 69 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
new file mode 100644
index 000..cf570d2
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientRatis;
+import org.apache.hadoop.hdds.scm.XceiverClientReply;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+
+/**
+ * This class tests the 2 way commit in Ratis.
+ */
+public class Test2WayCommitInRatis {
+
+  private MiniOzoneCluster cluster;
+  private OzoneClient client;
+  private ObjectStore objectStore;
+  private String volumeName;
+  private String bucketName;
+  private int chunkSize;
+  private int flushSize;
+  private int maxFlushSize;
+  private int blockSize;
+  private StorageContainerLocationProtocolClientSideTranslatorPB
+  storageContainerLocationClient;
+  private static String containerOwner = "OZONE";
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * 
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  private void startCluster(OzoneConfiguration conf) throws Exception {
+chunkSize = 100;
+flushSize = 2 * chunkSize;
+maxFlushSize = 2 * flushSize;
+blockSize = 2 * maxFlushSize;
+
+conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, 
TimeUnit.MILLISECONDS);
+conf.setTimeDuration(
+OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
+1, TimeUnit.SECONDS);
+
+conf.setQuietMode(false);
+cluster = MiniOzoneCluster.newBuilder(conf)
+.setNumDatanodes(7)
+.setBlockSize(blockSize)
+.setChunkSize(chunkSize)
+.setStreamBufferFlushSize(flushSize)
+.setStreamBufferMaxSize(maxFlushSize)
+.setStreamBufferSizeUnit(StorageUnit.BYTES)
+.build();
+cluster.waitForClusterToBeReady();
+//the easiest way to create an open container is creating a key
+client = OzoneClientFactory.getClient(conf);
+objectStore = client.getObjectStore();
+volumeName = "watchforc

[hadoop] branch trunk updated: HDDS-1749 : Ozone Client should randomize the list of nodes in pipeline for reads. (#1124)

2019-07-25 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ba43233  HDDS-1749 : Ozone Client should randomize the list of nodes 
in pipeline for reads. (#1124)
ba43233 is described below

commit ba43233451128118e999f51f7c52f2d50993b56e
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Thu Jul 25 07:54:46 2019 -0700

HDDS-1749 : Ozone Client should randomize the list of nodes in pipeline for 
reads. (#1124)
---
 .../src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java   | 4 
 1 file changed, 4 insertions(+)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index eee813f..9f99ab5 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -54,6 +54,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -269,6 +270,9 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   datanodeList = pipeline.getNodesInOrder();
 } else {
   datanodeList = pipeline.getNodes();
+  // Shuffle datanode list so that clients do not read in the same order
+  // every time.
+  Collections.shuffle(datanodeList);
 }
 for (DatanodeDetails dn : datanodeList) {
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1864. Turn on topology aware read in TestFailureHandlingByClient. (#1168)

2019-07-26 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c01e137  HDDS-1864. Turn on topology aware read in 
TestFailureHandlingByClient. (#1168)
c01e137 is described below

commit c01e137273fe531b124c390fadb4c8b39b7fe65b
Author: Sammi Chen 
AuthorDate: Sat Jul 27 01:41:34 2019 +0800

HDDS-1864. Turn on topology aware read in TestFailureHandlingByClient. 
(#1168)
---
 .../org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 7c014cc..7ce41a9 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -91,7 +91,7 @@ public class TestFailureHandlingByClient {
 OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
 1, TimeUnit.SECONDS);
 conf.setBoolean(
-ScmConfigKeys.DFS_NETWORK_TOPOLOGY_AWARE_READ_ENABLED, false);
+ScmConfigKeys.DFS_NETWORK_TOPOLOGY_AWARE_READ_ENABLED, true);
 
 conf.setQuietMode(false);
 cluster = MiniOzoneCluster.newBuilder(conf)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1816: ContainerStateMachine should limit number of pending apply transactions. Adds a config, uses snapshot threshold default value. (#1150)

2019-07-31 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d4ab9ae  HDDS-1816: ContainerStateMachine should limit number of 
pending apply transactions. Adds a config, uses snapshot threshold default 
value. (#1150)
d4ab9ae is described below

commit d4ab9aea6f9cbcdcaf48b821e5be04b4e952b133
Author: Lokesh Jain 
AuthorDate: Wed Jul 31 13:26:24 2019 +0530

HDDS-1816: ContainerStateMachine should limit number of pending apply 
transactions. Adds a config, uses snapshot threshold default value. (#1150)
---
 .../java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java|  8 
 hadoop-hdds/common/src/main/resources/ozone-default.xml   |  9 +
 .../transport/server/ratis/ContainerStateMachine.java | 15 +--
 3 files changed, 30 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 1f194d3..8fe609c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -90,6 +90,14 @@ public final class ScmConfigKeys {
   "dfs.container.ratis.statemachinedata.sync.retries";
   public static final int
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TRANSACTIONS =
+  "dfs.container.ratis.statemachine.max.pending.apply-transactions";
+  // The default value of maximum number of pending state machine apply
+  // transactions is kept same as default snapshot threshold.
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TRANSACTIONS_DEFAULT =
+  10;
   public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
   "dfs.container.ratis.log.queue.num-elements";
   public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index b2f820b..a88dd82 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -187,6 +187,15 @@
 
   
   
+
dfs.container.ratis.statemachine.max.pending.apply-transactions
+1
+OZONE, RATIS
+Maximum number of pending apply transactions in a data
+  pipeline. The default value is kept same as default snapshot threshold
+  dfs.ratis.snapshot.threshold.
+
+  
+  
 dfs.container.ratis.num.write.chunk.threads
 60
 OZONE, RATIS, PERFORMANCE
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index d82d114..872cc8a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -79,6 +80,7 @@ import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.ExecutionException;
 import java.util.stream.Collectors;
@@ -146,6 +148,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   private final Cache stateMachineDataCache;
   private final boolean isBlockTokenEnabled;
   private final TokenVerifier tokenVerifier;
+
+  private final Semaphore applyTransactionSemaphore;
   /**
* CSM metrics.
*/
@@ -175,6 +179,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 final int numContainerOpExecutors = conf.getInt(
 OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY,
 
OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT);
+i

[hadoop] branch trunk updated: HDDS-1782. Add an option to MiniOzoneChaosCluster to read files multiple times. Contributed by Mukul Kumar Singh. (#1076)

2019-08-02 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b5c74d4  HDDS-1782. Add an option to MiniOzoneChaosCluster to read 
files multiple times. Contributed by Mukul Kumar Singh. (#1076)
b5c74d4 is described below

commit b5c74d4ab88e2437c4a4f0464d0e2ea172c68367
Author: Mukul Kumar Singh 
AuthorDate: Fri Aug 2 21:50:39 2019 +0530

HDDS-1782. Add an option to MiniOzoneChaosCluster to read files multiple 
times. Contributed by Mukul Kumar Singh. (#1076)
---
 .../integration-test/src/test/bin/start-chaos.sh   |   2 +-
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java |   6 +-
 .../hadoop/ozone/MiniOzoneLoadGenerator.java   | 153 -
 .../hadoop/ozone/TestMiniChaosOzoneCluster.java|   9 +-
 .../apache/hadoop/ozone/chaos/TestProbability.java |  43 ++
 5 files changed, 173 insertions(+), 40 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh 
b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
index 5de6013..002fe94 100755
--- a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
+++ b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
@@ -22,7 +22,7 @@ current="/tmp/"
 filename="${current}${date}${fileformat}"
 heapdumpfile="${current}${date}${heapformat}"
 
-export MAVEN_OPTS="-XX:+HeapDumpOnOutOfMemoryError 
-XX:HeapDumpPath=${heapdumpfile}"
+export MAVEN_OPTS="-XX:+HeapDumpOnOutOfMemoryError 
-XX:HeapDumpPath=${heapdumpfile} 
-Dorg.apache.ratis.thirdparty.io.netty.allocator.useCacheForAllThreads=false"
 
 echo "logging to ${filename}"
 echo "heapdump to ${heapdumpfile}"
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index ce29417..75911df 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -108,7 +108,7 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
 LOG.info("{} Completed restarting Datanode: {}", failString,
 dn.getUuid());
   } catch (Exception e) {
-
+LOG.error("Failed to restartNodes Datanode", dn.getUuid());
   }
 }
   }
@@ -133,7 +133,7 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
 LOG.info("Completed {} DataNode {}", stopString, dn.getUuid());
 
   } catch (Exception e) {
-
+LOG.error("Failed to shutdown Datanode", dn.getUuid());
   }
 }
   }
@@ -247,6 +247,8 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
   conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
   conf.setInt("hdds.scm.replication.thread.interval", 10 * 1000);
   conf.setInt("hdds.scm.replication.event.timeout", 20 * 1000);
+  conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100);
+  conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100);
 }
 
 @Override
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
index 67edb15..b942447 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
@@ -21,6 +21,7 @@ import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.chaos.TestProbability;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
@@ -28,6 +29,7 @@ import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -37,7 +39,10 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 /**
  * A Simple Load generator for testing.
@@ -47,6 +52,8

[hadoop] branch trunk updated: HDDS-1878. checkstyle error in ContainerStateMachine (#1195)

2019-08-02 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 946e301  HDDS-1878. checkstyle error in ContainerStateMachine (#1195)
946e301 is described below

commit 946e30173142352f64040c876f4230bf36a10052
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Fri Aug 2 18:57:54 2019 +0200

HDDS-1878. checkstyle error in ContainerStateMachine (#1195)
---
 .../src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java  | 4 ++--
 .../common/transport/server/ratis/ContainerStateMachine.java | 5 ++---
 2 files changed, 4 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 8fe609c..cfecd0f 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -91,12 +91,12 @@ public final class ScmConfigKeys {
   public static final int
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
   public static final String
-  DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TRANSACTIONS =
+  DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS =
   "dfs.container.ratis.statemachine.max.pending.apply-transactions";
   // The default value of maximum number of pending state machine apply
   // transactions is kept same as default snapshot threshold.
   public static final int
-  DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TRANSACTIONS_DEFAULT =
+  DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT =
   10;
   public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
   "dfs.container.ratis.log.queue.num-elements";
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 872cc8a..768b37b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -91,7 +91,6 @@ import java.io.FileOutputStream;
 import java.io.FileInputStream;
 import java.io.OutputStream;
 
-
 /** A {@link org.apache.ratis.statemachine.StateMachine} for containers.
  *
  * The stateMachine is responsible for handling different types of container
@@ -181,9 +180,9 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 
OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT);
 int maxPendingApplyTransactions = conf.getInt(
 ScmConfigKeys.
-DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TRANSACTIONS,
+DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS,
 ScmConfigKeys.
-
DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TRANSACTIONS_DEFAULT);
+DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT);
 applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions);
 this.executors = new ExecutorService[numContainerOpExecutors];
 for (int i = 0; i < numContainerOpExecutors; i++) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1832 : Improve logging for PipelineActions handling in SCM and datanode. (#1200)

2019-08-02 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6e8c841  HDDS-1832 : Improve logging for PipelineActions handling in 
SCM and datanode. (#1200)
6e8c841 is described below

commit 6e8c8413ee8d7dc46a30035af0ea630d9755ac7f
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Fri Aug 2 10:01:00 2019 -0700

HDDS-1832 : Improve logging for PipelineActions handling in SCM and 
datanode. (#1200)
---
 .../common/transport/server/ratis/XceiverServerRatis.java  | 2 +-
 .../org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java | 7 ---
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 1ae5456..3a8b79b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -558,7 +558,7 @@ public final class XceiverServerRatis extends XceiverServer 
{
 if (triggerHB) {
   context.getParent().triggerHeartbeat();
 }
-LOG.debug(
+LOG.info(
 "pipeline Action " + action.getAction() + "  on pipeline " + pipelineID
 + ".Reason : " + action.getClosePipeline().getDetailedReason());
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
index 955bfc6..34e974a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
@@ -57,9 +57,10 @@ public class PipelineActionHandler
   pipelineID = PipelineID.
   getFromProtobuf(action.getClosePipeline().getPipelineID());
   Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
-  LOG.info("Received pipeline action {} for {} from datanode {}",
-  action.getAction(), pipeline, report.getDatanodeDetails());
-  pipelineManager.finalizeAndDestroyPipeline(pipeline, true);
+  LOG.info("Received pipeline action {} for {} from datanode {}. " +
+  "Reason : {}", action.getAction(), pipeline,
+  report.getDatanodeDetails(),
+  action.getClosePipeline().getDetailedReason());
 } catch (IOException ioe) {
   LOG.error("Could not execute pipeline action={} pipeline={} {}",
   action, pipelineID, ioe);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1798. Propagate failure in writeStateMachineData to Ratis. Contributed by Supratim Deka (#1113)

2019-08-05 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f8ea6e1  HDDS-1798. Propagate failure in writeStateMachineData to 
Ratis. Contributed by Supratim Deka (#1113)
f8ea6e1 is described below

commit f8ea6e1ce132c65d5ce11597818bbf972717711a
Author: supratimdeka <46919641+supratimd...@users.noreply.github.com>
AuthorDate: Mon Aug 5 13:43:41 2019 +0530

HDDS-1798. Propagate failure in writeStateMachineData to Ratis. Contributed 
by Supratim Deka (#1113)
---
 .../common/transport/server/ratis/CSMMetrics.java  | 10 
 .../server/ratis/ContainerStateMachine.java| 47 
 .../client/rpc/TestContainerStateMachine.java  |  8 +--
 .../rpc/TestContainerStateMachineFailures.java | 64 +++---
 4 files changed, 78 insertions(+), 51 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
index ebbec4d..ccf57cb 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
@@ -52,6 +52,7 @@ public class CSMMetrics {
 
   // Failure Metrics
   private @Metric MutableCounterLong numWriteStateMachineFails;
+  private @Metric MutableCounterLong numWriteDataFails;
   private @Metric MutableCounterLong numQueryStateMachineFails;
   private @Metric MutableCounterLong numApplyTransactionFails;
   private @Metric MutableCounterLong numReadStateMachineFails;
@@ -97,6 +98,10 @@ public class CSMMetrics {
 numWriteStateMachineFails.incr();
   }
 
+  public void incNumWriteDataFails() {
+numWriteDataFails.incr();
+  }
+
   public void incNumQueryStateMachineFails() {
 numQueryStateMachineFails.incr();
   }
@@ -142,6 +147,11 @@ public class CSMMetrics {
   }
 
   @VisibleForTesting
+  public long getNumWriteDataFails() {
+return numWriteDataFails.value();
+  }
+
+  @VisibleForTesting
   public long getNumQueryStateMachineFails() {
 return numQueryStateMachineFails.value();
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 768b37b..f4d4744 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -137,8 +137,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   private final ContainerDispatcher dispatcher;
   private ThreadPoolExecutor chunkExecutor;
   private final XceiverServerRatis ratisServer;
-  private final ConcurrentHashMap>
-  writeChunkFutureMap;
+  private final ConcurrentHashMap> writeChunkFutureMap;
 
   // keeps track of the containers created per pipeline
   private final Set createContainerSet;
@@ -385,9 +385,15 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 return response;
   }
 
+  private ContainerCommandResponseProto runCommandGetResponse(
+  ContainerCommandRequestProto requestProto,
+  DispatcherContext context) {
+return dispatchCommand(requestProto, context);
+  }
+
   private Message runCommand(ContainerCommandRequestProto requestProto,
   DispatcherContext context) {
-return dispatchCommand(requestProto, context)::toByteString;
+return runCommandGetResponse(requestProto, context)::toByteString;
   }
 
   private ExecutorService getCommandExecutor(
@@ -417,8 +423,11 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 .build();
 // ensure the write chunk happens asynchronously in writeChunkExecutor pool
 // thread.
-CompletableFuture writeChunkFuture = CompletableFuture
-.supplyAsync(() -> runCommand(requestProto, context), chunkExecutor);
+CompletableFuture writeChunkFuture =
+CompletableFuture.supplyAsync(() ->
+runCommandGetResponse(requestProto, context), chunkExecutor);
+
+CompletableFuture raftFuture = new CompletableFuture<>();
 
 writeChunkFutureMap.put(entryIndex, writeChunkFuture);
 LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " +
@@ -427,15 +436,29 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 // Remove the futu

[hadoop] branch trunk updated: HDDS-1908. TestMultiBlockWritesWithDnFailures is failing (#1282)

2019-08-13 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0b507d2  HDDS-1908. TestMultiBlockWritesWithDnFailures is failing 
(#1282)
0b507d2 is described below

commit 0b507d2ddf132985b43b4e2d3ad11d7fd2d90cd3
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Tue Aug 13 12:08:55 2019 +0200

HDDS-1908. TestMultiBlockWritesWithDnFailures is failing (#1282)
---
 .../client/rpc/TestFailureHandlingByClient.java| 65 +-
 .../rpc/TestMultiBlockWritesWithDnFailures.java| 76 ++
 2 files changed, 67 insertions(+), 74 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 3c7a25e..edb796b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -71,7 +72,6 @@ public class TestFailureHandlingByClient {
   private String volumeName;
   private String bucketName;
   private String keyString;
-  private int maxRetries;
 
   /**
* Create a MiniDFSCluster for testing.
@@ -82,7 +82,6 @@ public class TestFailureHandlingByClient {
*/
   private void init() throws Exception {
 conf = new OzoneConfiguration();
-maxRetries = 100;
 chunkSize = (int) OzoneConsts.MB;
 blockSize = 4 * chunkSize;
 conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 5,
@@ -125,7 +124,8 @@ public class TestFailureHandlingByClient {
   /**
* Shutdown MiniDFSCluster.
*/
-  private void shutdown() {
+  @After
+  public void shutdown() {
 if (cluster != null) {
   cluster.shutdown();
 }
@@ -170,61 +170,6 @@ public class TestFailureHandlingByClient {
 OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
 Assert.assertEquals(data.length, keyInfo.getDataSize());
 validateData(keyName, data);
-shutdown();
-  }
-
-
-  @Test
-  public void testMultiBlockWritesWithIntermittentDnFailures()
-  throws Exception {
-startCluster();
-String keyName = UUID.randomUUID().toString();
-OzoneOutputStream key =
-createKey(keyName, ReplicationType.RATIS, 6 * blockSize);
-String data = ContainerTestHelper
-.getFixedLengthString(keyString, blockSize + chunkSize);
-key.write(data.getBytes());
-
-// get the name of a valid container
-Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
-KeyOutputStream keyOutputStream =
-(KeyOutputStream) key.getOutputStream();
-List streamEntryList =
-keyOutputStream.getStreamEntries();
-
-// Assert that 6 block will be preallocated
-Assert.assertEquals(6, streamEntryList.size());
-key.write(data.getBytes());
-key.flush();
-long containerId = streamEntryList.get(0).getBlockID().getContainerID();
-BlockID blockId = streamEntryList.get(0).getBlockID();
-ContainerInfo container =
-cluster.getStorageContainerManager().getContainerManager()
-.getContainer(ContainerID.valueof(containerId));
-Pipeline pipeline =
-cluster.getStorageContainerManager().getPipelineManager()
-.getPipeline(container.getPipelineID());
-List datanodes = pipeline.getNodes();
-cluster.shutdownHddsDatanode(datanodes.get(0));
-
-// The write will fail but exception will be handled and length will be
-// updated correctly in OzoneManager once the steam is closed
-key.write(data.getBytes());
-
-// shutdown the second datanode
-cluster.shutdownHddsDatanode(datanodes.get(1));
-key.write(data.getBytes());
-key.close();
-OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
-.setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
-.setRefreshPipeline(true)
-.build();
-OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-Assert.assertEquals(4 * data.getBytes().length, keyInfo.getDataSize());
-validateData(keyName,
-data.concat(data).concat(data).concat(data).getBytes());
-shutdown();
   }
 
   @Test
@@ -270,7 +215,6 @@ public

[hadoop] branch trunk updated: HDFS-13101. Yet another fsimage corruption related to snapshot. Contributed by Shashikant Banerjee.

2019-08-14 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0a85af9  HDFS-13101. Yet another fsimage corruption related to 
snapshot. Contributed by Shashikant Banerjee.
0a85af9 is described below

commit 0a85af959ce505f0659e5c69d0ca83a5dce0a7c2
Author: Shashikant Banerjee 
AuthorDate: Thu Aug 15 10:16:25 2019 +0530

HDFS-13101. Yet another fsimage corruption related to snapshot. Contributed 
by Shashikant Banerjee.
---
 .../apache/hadoop/hdfs/server/namenode/INode.java  |  13 +++
 .../hdfs/server/namenode/INodeDirectory.java   |   8 ++
 .../hadoop/hdfs/server/namenode/INodeFile.java |  12 +++
 .../namenode/snapshot/AbstractINodeDiffList.java   |  15 +++-
 .../snapshot/DirectoryWithSnapshotFeature.java |  21 +++--
 .../namenode/snapshot/FileWithSnapshotFeature.java |   5 ++
 .../server/namenode/TestFSImageWithSnapshot.java   | 100 -
 .../namenode/snapshot/SnapshotTestHelper.java  |   3 +-
 8 files changed, 168 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 03b1ca3..c8d7ed6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -39,6 +39,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
+import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.Diff;
@@ -646,6 +647,18 @@ public abstract class INode implements INodeAttributes, 
Diff.Element {
 return parent == null || !parent.isReference()? null: 
(INodeReference)parent;
   }
 
+  /**
+   * @return true if this is a reference and the reference count is 1;
+   * otherwise, return false.
+   */
+  public boolean isLastReference() {
+final INodeReference ref = getParentReference();
+if (!(ref instanceof WithCount)) {
+  return false;
+}
+return ((WithCount)ref).getReferenceCount() == 1;
+  }
+
   /** Set parent directory */
   public final void setParent(INodeDirectory parent) {
 this.parent = parent;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 28eb3d2..e71cb0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -903,6 +903,14 @@ public class INodeDirectory extends 
INodeWithAdditionalFields
   prefix.setLength(prefix.length() - 2);
   prefix.append("  ");
 }
+
+final DirectoryWithSnapshotFeature snapshotFeature =
+getDirectoryWithSnapshotFeature();
+if (snapshotFeature != null) {
+  out.print(prefix);
+  out.print(snapshotFeature);
+}
+out.println();
 dumpTreeRecursively(out, prefix, new Iterable() {
   final Iterator i = getChildrenList(snapshot).iterator();
   
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 6693297..7b6f1e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -1046,6 +1046,18 @@ public class INodeFile extends INodeWithAdditionalFields
 out.print(", blocks=");
 out.print(blocks.length == 0 ? null: blocks[0]);
 out.println();
+
+final FileWithSnapshotFeature snapshotFeature =
+getFileWithSnapshotFeature();
+if (snapshotFeature != null) {
+  if (prefix.length() >= 2) {
+prefix.setLength(prefix.length() - 2);
+prefix.append("  ");
+  }
+  out.print(prefix);
+  out.print(snapshotFeature);
+}
+out.println();
   }
 
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.ja

[hadoop] branch trunk updated: HDDS-1610. applyTransaction failure should not be lost on restart. Contributed by Shashikant Banerjee.

2019-08-20 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6244502  HDDS-1610. applyTransaction failure should not be lost on 
restart. Contributed by Shashikant Banerjee.
6244502 is described below

commit 62445021d5d57b0d49adcb1bd4365c13532328fc
Author: Shashikant Banerjee 
AuthorDate: Tue Aug 20 14:37:01 2019 +0530

HDDS-1610. applyTransaction failure should not be lost on restart. 
Contributed by Shashikant Banerjee.
---
 .../server/ratis/ContainerStateMachine.java|  84 +++
 .../transport/server/ratis/XceiverServerRatis.java |   9 ++
 .../proto/StorageContainerDatanodeProtocol.proto   |   1 +
 .../snapshot/DirectoryWithSnapshotFeature.java |   4 +-
 .../rpc/TestContainerStateMachineFailures.java | 156 +++--
 .../ozone/container/ContainerTestHelper.java   |  16 +++
 .../freon/TestFreonWithDatanodeFastRestart.java|  17 +--
 7 files changed, 207 insertions(+), 80 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index f4d4744..aadec8d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
 import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.StateMachineException;
 import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.impl.RaftServerProxy;
 import org.apache.ratis.server.protocol.TermIndex;
@@ -83,6 +84,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 import java.util.Set;
 import java.util.concurrent.ConcurrentSkipListSet;
@@ -147,6 +149,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   private final Cache stateMachineDataCache;
   private final boolean isBlockTokenEnabled;
   private final TokenVerifier tokenVerifier;
+  private final AtomicBoolean isStateMachineHealthy;
 
   private final Semaphore applyTransactionSemaphore;
   /**
@@ -184,6 +187,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 ScmConfigKeys.
 DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT);
 applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions);
+isStateMachineHealthy = new AtomicBoolean(true);
 this.executors = new ExecutorService[numContainerOpExecutors];
 for (int i = 0; i < numContainerOpExecutors; i++) {
   final int index = i;
@@ -265,6 +269,14 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   public long takeSnapshot() throws IOException {
 TermIndex ti = getLastAppliedTermIndex();
 long startTime = Time.monotonicNow();
+if (!isStateMachineHealthy.get()) {
+  String msg =
+  "Failed to take snapshot " + " for " + gid + " as the stateMachine"
+  + " is unhealthy. The last applied index is at " + ti;
+  StateMachineException sme = new StateMachineException(msg);
+  LOG.error(msg);
+  throw sme;
+}
 if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) {
   final File snapshotFile =
   storage.getSnapshotFile(ti.getTerm(), ti.getIndex());
@@ -275,12 +287,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 // make sure the snapshot file is synced
 fos.getFD().sync();
   } catch (IOException ioe) {
-LOG.info("{}: Failed to write snapshot at:{} file {}", gid, ti,
+LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti,
 snapshotFile);
 throw ioe;
   }
-  LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}",
-  gid, ti, snapshotFile, (Time.monotonicNow() - startTime));
+  LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}", gid, ti,
+  snapshotFile, (Time.monotonicNow() - startTime));
   return ti.getIndex();
 }
 return -1;
@@ -385,17 +397,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 return 

[hadoop] branch trunk updated: HDDS-1998. TestSecureContainerServer#testClientServerRatisGrpc is failing (#1328)

2019-08-26 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3329257  HDDS-1998. 
TestSecureContainerServer#testClientServerRatisGrpc is failing (#1328)
3329257 is described below

commit 3329257d99d2808e66ae6c2fe87a9c4f8877026f
Author: HUAN-PING SU 
AuthorDate: Tue Aug 27 14:28:23 2019 +0800

HDDS-1998. TestSecureContainerServer#testClientServerRatisGrpc is failing 
(#1328)
---
 .../ozone/container/server/TestSecureContainerServer.java | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 4de814c..431994e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.container.server;
 
 import org.apache.commons.lang3.RandomUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -72,7 +73,6 @@ import java.util.List;
 import java.util.Set;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
-import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.SUCCESS;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
 import static 
org.apache.hadoop.ozone.container.ContainerTestHelper.getCreateContainerRequest;
@@ -80,6 +80,7 @@ import static 
org.apache.hadoop.ozone.container.ContainerTestHelper.getTestConta
 import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
 import static org.apache.ratis.rpc.SupportedRpcType.NETTY;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Test Container servers when security is enabled.
@@ -202,9 +203,11 @@ public class TestSecureContainerServer {
 " authenticate with GRPC XceiverServer with Ozone block token",
 () -> finalClient.sendCommand(request));
   } else {
-ContainerCommandResponseProto response = finalClient.
-sendCommand(request);
-assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, response.getResult());
+IOException e = LambdaTestUtils.intercept(IOException.class,
+() -> finalClient.sendCommand(request));
+Throwable rootCause = ExceptionUtils.getRootCause(e);
+String msg = rootCause.getMessage();
+assertTrue(msg, msg.contains("Block token verification failed"));
   }
 
   // Test 2: Test success in request with valid block token.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: Revert "HDDS-1610. applyTransaction failure should not be lost on restart. Contributed by Shashikant Banerjee."

2019-08-27 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ce8eb12  Revert "HDDS-1610. applyTransaction failure should not be 
lost on restart. Contributed by Shashikant Banerjee."
ce8eb12 is described below

commit ce8eb1283acbebb990a4f1e40848d78700309222
Author: Shashikant Banerjee 
AuthorDate: Tue Aug 27 23:23:44 2019 +0530

Revert "HDDS-1610. applyTransaction failure should not be lost on restart. 
Contributed by Shashikant Banerjee."

This reverts commit 62445021d5d57b0d49adcb1bd4365c13532328fc as it has 
unintended changes in DirectoryWithSnapshotFeature class..
---
 .../server/ratis/ContainerStateMachine.java|  84 ---
 .../transport/server/ratis/XceiverServerRatis.java |   9 --
 .../proto/StorageContainerDatanodeProtocol.proto   |   1 -
 .../snapshot/DirectoryWithSnapshotFeature.java |   4 +-
 .../rpc/TestContainerStateMachineFailures.java | 156 ++---
 .../ozone/container/ContainerTestHelper.java   |  16 ---
 .../freon/TestFreonWithDatanodeFastRestart.java|  17 ++-
 7 files changed, 80 insertions(+), 207 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index aadec8d..f4d4744 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -34,7 +34,6 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
 import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.StateMachineException;
 import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.impl.RaftServerProxy;
 import org.apache.ratis.server.protocol.TermIndex;
@@ -84,7 +83,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 import java.util.Set;
 import java.util.concurrent.ConcurrentSkipListSet;
@@ -149,7 +147,6 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   private final Cache stateMachineDataCache;
   private final boolean isBlockTokenEnabled;
   private final TokenVerifier tokenVerifier;
-  private final AtomicBoolean isStateMachineHealthy;
 
   private final Semaphore applyTransactionSemaphore;
   /**
@@ -187,7 +184,6 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 ScmConfigKeys.
 DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT);
 applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions);
-isStateMachineHealthy = new AtomicBoolean(true);
 this.executors = new ExecutorService[numContainerOpExecutors];
 for (int i = 0; i < numContainerOpExecutors; i++) {
   final int index = i;
@@ -269,14 +265,6 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   public long takeSnapshot() throws IOException {
 TermIndex ti = getLastAppliedTermIndex();
 long startTime = Time.monotonicNow();
-if (!isStateMachineHealthy.get()) {
-  String msg =
-  "Failed to take snapshot " + " for " + gid + " as the stateMachine"
-  + " is unhealthy. The last applied index is at " + ti;
-  StateMachineException sme = new StateMachineException(msg);
-  LOG.error(msg);
-  throw sme;
-}
 if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) {
   final File snapshotFile =
   storage.getSnapshotFile(ti.getTerm(), ti.getIndex());
@@ -287,12 +275,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 // make sure the snapshot file is synced
 fos.getFD().sync();
   } catch (IOException ioe) {
-LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti,
+LOG.info("{}: Failed to write snapshot at:{} file {}", gid, ti,
 snapshotFile);
 throw ioe;
   }
-  LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}", gid, ti,
-  snapshotFile, (Time.monotonicNow() - startTime));
+  LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}",
+  gid, ti, snapshotFile, (Time.monot

[hadoop] branch trunk updated: HDDS-1610. applyTransaction failure should not be lost on restart. Contributed by Shashikant Banerjee(#1226).

2019-08-27 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 66cfa48  HDDS-1610. applyTransaction failure should not be lost on 
restart. Contributed by Shashikant Banerjee(#1226).
66cfa48 is described below

commit 66cfa482c450320f7326b2568703bae0d4b39e3c
Author: Shashikant Banerjee 
AuthorDate: Tue Aug 27 23:38:43 2019 +0530

HDDS-1610. applyTransaction failure should not be lost on restart. 
Contributed by Shashikant Banerjee(#1226).
---
 .../server/ratis/ContainerStateMachine.java|  84 +++
 .../transport/server/ratis/XceiverServerRatis.java |   9 ++
 .../proto/StorageContainerDatanodeProtocol.proto   |   1 +
 .../rpc/TestContainerStateMachineFailures.java | 156 +++--
 .../ozone/container/ContainerTestHelper.java   |  16 +++
 .../freon/TestFreonWithDatanodeFastRestart.java|  17 +--
 6 files changed, 205 insertions(+), 78 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index f4d4744..aadec8d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
 import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.StateMachineException;
 import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.impl.RaftServerProxy;
 import org.apache.ratis.server.protocol.TermIndex;
@@ -83,6 +84,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 import java.util.Set;
 import java.util.concurrent.ConcurrentSkipListSet;
@@ -147,6 +149,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   private final Cache stateMachineDataCache;
   private final boolean isBlockTokenEnabled;
   private final TokenVerifier tokenVerifier;
+  private final AtomicBoolean isStateMachineHealthy;
 
   private final Semaphore applyTransactionSemaphore;
   /**
@@ -184,6 +187,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 ScmConfigKeys.
 DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT);
 applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions);
+isStateMachineHealthy = new AtomicBoolean(true);
 this.executors = new ExecutorService[numContainerOpExecutors];
 for (int i = 0; i < numContainerOpExecutors; i++) {
   final int index = i;
@@ -265,6 +269,14 @@ public class ContainerStateMachine extends 
BaseStateMachine {
   public long takeSnapshot() throws IOException {
 TermIndex ti = getLastAppliedTermIndex();
 long startTime = Time.monotonicNow();
+if (!isStateMachineHealthy.get()) {
+  String msg =
+  "Failed to take snapshot " + " for " + gid + " as the stateMachine"
+  + " is unhealthy. The last applied index is at " + ti;
+  StateMachineException sme = new StateMachineException(msg);
+  LOG.error(msg);
+  throw sme;
+}
 if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) {
   final File snapshotFile =
   storage.getSnapshotFile(ti.getTerm(), ti.getIndex());
@@ -275,12 +287,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 // make sure the snapshot file is synced
 fos.getFD().sync();
   } catch (IOException ioe) {
-LOG.info("{}: Failed to write snapshot at:{} file {}", gid, ti,
+LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti,
 snapshotFile);
 throw ioe;
   }
-  LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}",
-  gid, ti, snapshotFile, (Time.monotonicNow() - startTime));
+  LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}", gid, ti,
+  snapshotFile, (Time.monotonicNow() - startTime));
   return ti.getIndex();
 }
 return -1;
@@ -385,17 +397,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 return response;
   }
 
-  private ContainerComm

[hadoop] branch trunk updated: HDDS-1783 : Latency metric for applyTransaction in ContainerStateMachine (#1363).

2019-09-03 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b53d19a  HDDS-1783 : Latency metric for applyTransaction in 
ContainerStateMachine (#1363).
b53d19a is described below

commit b53d19a343e110dbcf0ec710e9d491ec6bd77a51
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Tue Sep 3 02:48:50 2019 -0700

HDDS-1783 : Latency metric for applyTransaction in ContainerStateMachine 
(#1363).
---
 .../common/transport/server/ratis/CSMMetrics.java| 14 ++
 .../transport/server/ratis/ContainerStateMachine.java| 15 ---
 .../dist/src/main/compose/ozonesecure-mr/docker-config   |  2 +-
 .../common/transport/server/ratis/TestCSMMetrics.java| 16 
 4 files changed, 43 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
index 104a433..9893ae4 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
@@ -60,6 +60,9 @@ public class CSMMetrics {
   private @Metric MutableCounterLong numStartTransactionVerifyFailures;
   private @Metric MutableCounterLong numContainerNotOpenVerifyFailures;
 
+  private @Metric MutableRate applyTransaction;
+  private @Metric MutableRate writeStateMachineData;
+
   public CSMMetrics() {
 int numCmdTypes = ContainerProtos.Type.values().length;
 this.opsLatency = new MutableRate[numCmdTypes];
@@ -186,6 +189,10 @@ public class CSMMetrics {
 return numBytesCommittedCount.value();
   }
 
+  public MutableRate getApplyTransactionLatency() {
+return applyTransaction;
+  }
+
   public void incPipelineLatency(ContainerProtos.Type type, long latencyNanos) 
{
 opsLatency[type.ordinal()].add(latencyNanos);
 transactionLatency.add(latencyNanos);
@@ -199,6 +206,13 @@ public class CSMMetrics {
 numContainerNotOpenVerifyFailures.incr();
   }
 
+  public void recordApplyTransactionCompletion(long latencyNanos) {
+applyTransaction.add(latencyNanos);
+  }
+
+  public void recordWriteStateMachineCompletion(long latencyNanos) {
+writeStateMachineData.add(latencyNanos);
+  }
 
   public void unRegister() {
 MetricsSystem ms = DefaultMetricsSystem.instance();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 4f876bc..0780f84 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -411,7 +411,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   }
 
   private CompletableFuture handleWriteChunk(
-  ContainerCommandRequestProto requestProto, long entryIndex, long term) {
+  ContainerCommandRequestProto requestProto, long entryIndex, long term,
+  long startTime) {
 final WriteChunkRequestProto write = requestProto.getWriteChunk();
 RaftServer server = ratisServer.getServer();
 Preconditions.checkState(server instanceof RaftServerProxy);
@@ -461,6 +462,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 write.getBlockID() + " logIndex " + entryIndex + " chunkName " +
 write.getChunkData().getChunkName());
 raftFuture.complete(r::toByteString);
+metrics.recordWriteStateMachineCompletion(
+Time.monotonicNowNanos() - startTime);
   }
 
   writeChunkFutureMap.remove(entryIndex);
@@ -477,6 +480,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   public CompletableFuture writeStateMachineData(LogEntryProto entry) 
{
 try {
   metrics.incNumWriteStateMachineOps();
+  long writeStateMachineStartTime = Time.monotonicNowNanos();
   ContainerCommandRequestProto requestProto =
   getContainerCommandRequestProto(
   entry.getStateMachineLogEntry().getLogData());
@@ -493,7 +497,7 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   switch (cmdType) {
   case WriteChunk:
 return handleWriteChunk(requestProto, entry.getIndex(),
-entry.getTerm());

[hadoop] branch trunk updated: HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. Contributed by Shashikant Banerjee.

2019-12-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fdd96e4  HDFS-15012. NN fails to parse Edit logs after applying 
HDFS-13101. Contributed by Shashikant Banerjee.
fdd96e4 is described below

commit fdd96e46d1f89f0ecdb9b1836dc7fca9fbb954fd
Author: Shashikant Banerjee 
AuthorDate: Wed Dec 18 22:50:46 2019 +0530

HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. 
Contributed by Shashikant Banerjee.
---
 .../snapshot/DirectoryWithSnapshotFeature.java |  8 ++-
 .../namenode/snapshot/TestRenameWithSnapshots.java | 64 ++
 2 files changed, 70 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 7fb639c..4e756c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -739,8 +739,12 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
   // were created before "prior" will be covered by the later 
   // cleanSubtreeRecursively call.
   if (priorCreated != null) {
-if (currentINode.isLastReference()) {
-  // if this is the last reference, the created list can be
+if (currentINode.isLastReference() &&
+currentINode.getDiffs().getLastSnapshotId() == prior) {
+  // If this is the last reference of the directory inode and it
+  // can not be accessed in any of the subsequent snapshots i.e,
+  // this is the latest snapshot diff and if this is the last
+  // reference, the created list can be
   // destroyed.
   priorDiff.getChildrenDiff().destroyCreatedList(
   reclaimContext, currentINode);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index f5b5345..128e3ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -49,6 +49,7 @@ import org.mockito.Mockito;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
@@ -2412,4 +2413,67 @@ public class TestRenameWithSnapshots {
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", 
"newDir/file2"));
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", 
"newDir/file1"));
   }
+
+  @Test (timeout=6)
+  public void testDoubleRenamesWithSnapshotDelete() throws Exception {
+hdfs.mkdirs(sub1);
+hdfs.allowSnapshot(sub1);
+final Path dir1 = new Path(sub1, "dir1");
+final Path dir2 = new Path(sub1, "dir2");
+final Path dir3 = new Path(sub1, "dir3");
+final String snap3 = "snap3";
+final String snap4 = "snap4";
+final String snap5 = "snap5";
+final String snap6 = "snap6";
+final Path foo = new Path(dir2, "foo");
+final Path bar = new Path(dir2, "bar");
+hdfs.createSnapshot(sub1, snap1);
+hdfs.mkdirs(dir1, new FsPermission((short) 0777));
+rename(dir1, dir2);
+hdfs.createSnapshot(sub1, snap2);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap3);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap4);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap5);
+rename(dir2, dir3);
+hdfs.createSnapshot(sub1, snap6);
+hdfs.delete(dir3, true);
+deleteSnapshot(sub1, snap6);
+deleteSnapshot(sub1, snap3);
+// save namespace and restart Namenode
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+hdfs.saveNamespace();
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+cluster.restartNameNode(true);
+  }
+
+
+  void rename(Pa

[hadoop] branch branch-3.2 updated: HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. Contributed by Shashikant Banerjee.

2019-12-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new afe00a1  HDFS-15012. NN fails to parse Edit logs after applying 
HDFS-13101. Contributed by Shashikant Banerjee.
afe00a1 is described below

commit afe00a1ca58b83bd5d91cba65559cb6bdf7c6220
Author: Shashikant Banerjee 
AuthorDate: Wed Dec 18 22:50:46 2019 +0530

HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. 
Contributed by Shashikant Banerjee.

(cherry picked from commit fdd96e46d1f89f0ecdb9b1836dc7fca9fbb954fd)
---
 .../snapshot/DirectoryWithSnapshotFeature.java |  8 ++-
 .../namenode/snapshot/TestRenameWithSnapshots.java | 64 ++
 2 files changed, 70 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 7fb639c..4e756c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -739,8 +739,12 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
   // were created before "prior" will be covered by the later 
   // cleanSubtreeRecursively call.
   if (priorCreated != null) {
-if (currentINode.isLastReference()) {
-  // if this is the last reference, the created list can be
+if (currentINode.isLastReference() &&
+currentINode.getDiffs().getLastSnapshotId() == prior) {
+  // If this is the last reference of the directory inode and it
+  // can not be accessed in any of the subsequent snapshots i.e,
+  // this is the latest snapshot diff and if this is the last
+  // reference, the created list can be
   // destroyed.
   priorDiff.getChildrenDiff().destroyCreatedList(
   reclaimContext, currentINode);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index e8819ee..31b62d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -49,6 +49,7 @@ import org.mockito.Mockito;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
@@ -2410,4 +2411,67 @@ public class TestRenameWithSnapshots {
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", 
"newDir/file2"));
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", 
"newDir/file1"));
   }
+
+  @Test (timeout=6)
+  public void testDoubleRenamesWithSnapshotDelete() throws Exception {
+hdfs.mkdirs(sub1);
+hdfs.allowSnapshot(sub1);
+final Path dir1 = new Path(sub1, "dir1");
+final Path dir2 = new Path(sub1, "dir2");
+final Path dir3 = new Path(sub1, "dir3");
+final String snap3 = "snap3";
+final String snap4 = "snap4";
+final String snap5 = "snap5";
+final String snap6 = "snap6";
+final Path foo = new Path(dir2, "foo");
+final Path bar = new Path(dir2, "bar");
+hdfs.createSnapshot(sub1, snap1);
+hdfs.mkdirs(dir1, new FsPermission((short) 0777));
+rename(dir1, dir2);
+hdfs.createSnapshot(sub1, snap2);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap3);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap4);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap5);
+rename(dir2, dir3);
+hdfs.createSnapshot(sub1, snap6);
+hdfs.delete(dir3, true);
+deleteSnapshot(sub1, snap6);
+deleteSnapshot(sub1, snap3);
+// save namespace and restart Namenode
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+hdfs.saveNamespace();
+hdfs.setSafeMode(SafeM

[hadoop] branch branch-3.1 updated: HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. Contributed by Shashikant Banerjee.

2019-12-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 280cd6d  HDFS-15012. NN fails to parse Edit logs after applying 
HDFS-13101. Contributed by Shashikant Banerjee.
280cd6d is described below

commit 280cd6dd1ee945342d6269842280f7ea59ed
Author: Shashikant Banerjee 
AuthorDate: Wed Dec 18 22:50:46 2019 +0530

HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. 
Contributed by Shashikant Banerjee.

(cherry picked from commit fdd96e46d1f89f0ecdb9b1836dc7fca9fbb954fd)
---
 .../snapshot/DirectoryWithSnapshotFeature.java |  8 ++-
 .../namenode/snapshot/TestRenameWithSnapshots.java | 64 ++
 2 files changed, 70 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 7fb639c..4e756c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -739,8 +739,12 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
   // were created before "prior" will be covered by the later 
   // cleanSubtreeRecursively call.
   if (priorCreated != null) {
-if (currentINode.isLastReference()) {
-  // if this is the last reference, the created list can be
+if (currentINode.isLastReference() &&
+currentINode.getDiffs().getLastSnapshotId() == prior) {
+  // If this is the last reference of the directory inode and it
+  // can not be accessed in any of the subsequent snapshots i.e,
+  // this is the latest snapshot diff and if this is the last
+  // reference, the created list can be
   // destroyed.
   priorDiff.getChildrenDiff().destroyCreatedList(
   reclaimContext, currentINode);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index 8d54e15..cb4b3c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -49,6 +49,7 @@ import org.mockito.internal.util.reflection.Whitebox;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
@@ -2409,4 +2410,67 @@ public class TestRenameWithSnapshots {
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", 
"newDir/file2"));
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", 
"newDir/file1"));
   }
+
+  @Test (timeout=6)
+  public void testDoubleRenamesWithSnapshotDelete() throws Exception {
+hdfs.mkdirs(sub1);
+hdfs.allowSnapshot(sub1);
+final Path dir1 = new Path(sub1, "dir1");
+final Path dir2 = new Path(sub1, "dir2");
+final Path dir3 = new Path(sub1, "dir3");
+final String snap3 = "snap3";
+final String snap4 = "snap4";
+final String snap5 = "snap5";
+final String snap6 = "snap6";
+final Path foo = new Path(dir2, "foo");
+final Path bar = new Path(dir2, "bar");
+hdfs.createSnapshot(sub1, snap1);
+hdfs.mkdirs(dir1, new FsPermission((short) 0777));
+rename(dir1, dir2);
+hdfs.createSnapshot(sub1, snap2);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap3);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap4);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap5);
+rename(dir2, dir3);
+hdfs.createSnapshot(sub1, snap6);
+hdfs.delete(dir3, true);
+deleteSnapshot(sub1, snap6);
+deleteSnapshot(sub1, snap3);
+// save namespace and restart Namenode
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+hdfs.saveNamespace();
+ 

[hadoop] branch branch-2.10 updated: HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. Contributed by Shashikant Banerjee.

2019-12-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new abbf2a4  HDFS-15012. NN fails to parse Edit logs after applying 
HDFS-13101. Contributed by Shashikant Banerjee.
abbf2a4 is described below

commit abbf2a4446ed9345e5c87a5636b04c300cb72cb3
Author: Shashikant Banerjee 
AuthorDate: Wed Dec 18 22:50:46 2019 +0530

HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. 
Contributed by Shashikant Banerjee.

(cherry picked from commit fdd96e46d1f89f0ecdb9b1836dc7fca9fbb954fd)
---
 .../snapshot/DirectoryWithSnapshotFeature.java |  8 ++-
 .../namenode/snapshot/TestRenameWithSnapshots.java | 64 ++
 2 files changed, 70 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 0b6078a..d2dd3ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -740,8 +740,12 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
   // were created before "prior" will be covered by the later 
   // cleanSubtreeRecursively call.
   if (priorCreated != null) {
-if (currentINode.isLastReference()) {
-  // if this is the last reference, the created list can be
+if (currentINode.isLastReference() &&
+currentINode.getDiffs().getLastSnapshotId() == prior) {
+  // If this is the last reference of the directory inode and it
+  // can not be accessed in any of the subsequent snapshots i.e,
+  // this is the latest snapshot diff and if this is the last
+  // reference, the created list can be
   // destroyed.
   priorDiff.getChildrenDiff().destroyCreatedList(
   reclaimContext, currentINode);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index ad3a5a1..65b129a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -29,6 +29,7 @@ import static org.mockito.Mockito.spy;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
@@ -2409,4 +2410,67 @@ public class TestRenameWithSnapshots {
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", 
"newDir/file2"));
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", 
"newDir/file1"));
   }
+
+  @Test (timeout=6)
+  public void testDoubleRenamesWithSnapshotDelete() throws Exception {
+hdfs.mkdirs(sub1);
+hdfs.allowSnapshot(sub1);
+final Path dir1 = new Path(sub1, "dir1");
+final Path dir2 = new Path(sub1, "dir2");
+final Path dir3 = new Path(sub1, "dir3");
+final String snap3 = "snap3";
+final String snap4 = "snap4";
+final String snap5 = "snap5";
+final String snap6 = "snap6";
+final Path foo = new Path(dir2, "foo");
+final Path bar = new Path(dir2, "bar");
+hdfs.createSnapshot(sub1, snap1);
+hdfs.mkdirs(dir1, new FsPermission((short) 0777));
+rename(dir1, dir2);
+hdfs.createSnapshot(sub1, snap2);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap3);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap4);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap5);
+rename(dir2, dir3);
+hdfs.createSnapshot(sub1, snap6);
+hdfs.delete(dir3, true);
+deleteSnapshot(sub1, snap6);
+deleteSnapshot(sub1, snap3);
+// save namespace and restart Namenode
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+hdfs.saveNamespace();
+hdfs.setSafeMo

[hadoop] branch branch-2.9 updated: HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. Contributed by Shashikant Banerjee.

2019-12-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new 49d7152  HDFS-15012. NN fails to parse Edit logs after applying 
HDFS-13101. Contributed by Shashikant Banerjee.
49d7152 is described below

commit 49d7152c356cd062084489bd175cdf90c0ae9298
Author: Shashikant Banerjee 
AuthorDate: Wed Dec 18 22:50:46 2019 +0530

HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. 
Contributed by Shashikant Banerjee.

(cherry picked from commit fdd96e46d1f89f0ecdb9b1836dc7fca9fbb954fd)
---
 .../snapshot/DirectoryWithSnapshotFeature.java |  8 ++-
 .../namenode/snapshot/TestRenameWithSnapshots.java | 64 ++
 2 files changed, 70 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 0b6078a..d2dd3ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -740,8 +740,12 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
   // were created before "prior" will be covered by the later 
   // cleanSubtreeRecursively call.
   if (priorCreated != null) {
-if (currentINode.isLastReference()) {
-  // if this is the last reference, the created list can be
+if (currentINode.isLastReference() &&
+currentINode.getDiffs().getLastSnapshotId() == prior) {
+  // If this is the last reference of the directory inode and it
+  // can not be accessed in any of the subsequent snapshots i.e,
+  // this is the latest snapshot diff and if this is the last
+  // reference, the created list can be
   // destroyed.
   priorDiff.getChildrenDiff().destroyCreatedList(
   reclaimContext, currentINode);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index ad3a5a1..65b129a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -29,6 +29,7 @@ import static org.mockito.Mockito.spy;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
@@ -2409,4 +2410,67 @@ public class TestRenameWithSnapshots {
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", 
"newDir/file2"));
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", 
"newDir/file1"));
   }
+
+  @Test (timeout=6)
+  public void testDoubleRenamesWithSnapshotDelete() throws Exception {
+hdfs.mkdirs(sub1);
+hdfs.allowSnapshot(sub1);
+final Path dir1 = new Path(sub1, "dir1");
+final Path dir2 = new Path(sub1, "dir2");
+final Path dir3 = new Path(sub1, "dir3");
+final String snap3 = "snap3";
+final String snap4 = "snap4";
+final String snap5 = "snap5";
+final String snap6 = "snap6";
+final Path foo = new Path(dir2, "foo");
+final Path bar = new Path(dir2, "bar");
+hdfs.createSnapshot(sub1, snap1);
+hdfs.mkdirs(dir1, new FsPermission((short) 0777));
+rename(dir1, dir2);
+hdfs.createSnapshot(sub1, snap2);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap3);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap4);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap5);
+rename(dir2, dir3);
+hdfs.createSnapshot(sub1, snap6);
+hdfs.delete(dir3, true);
+deleteSnapshot(sub1, snap6);
+deleteSnapshot(sub1, snap3);
+// save namespace and restart Namenode
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+hdfs.saveNamespace();
+hdfs.setSafeMo

[hadoop] branch branch-2.8 updated: HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. Contributed by Shashikant Banerjee.

2019-12-18 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new 78b71f6  HDFS-15012. NN fails to parse Edit logs after applying 
HDFS-13101. Contributed by Shashikant Banerjee.
78b71f6 is described below

commit 78b71f6d7466249cea31ac799a242cceaf874fa1
Author: Shashikant Banerjee 
AuthorDate: Wed Dec 18 22:50:46 2019 +0530

HDFS-15012. NN fails to parse Edit logs after applying HDFS-13101. 
Contributed by Shashikant Banerjee.

(cherry picked from commit fdd96e46d1f89f0ecdb9b1836dc7fca9fbb954fd)
---
 .../snapshot/DirectoryWithSnapshotFeature.java |  8 ++-
 .../namenode/snapshot/TestRenameWithSnapshots.java | 64 ++
 2 files changed, 70 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 94b8106..6b4f0dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -739,8 +739,12 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
   // were created before "prior" will be covered by the later 
   // cleanSubtreeRecursively call.
   if (priorCreated != null) {
-if (currentINode.isLastReference()) {
-  // if this is the last reference, the created list can be
+if (currentINode.isLastReference() &&
+currentINode.getDiffs().getLastSnapshotId() == prior) {
+  // If this is the last reference of the directory inode and it
+  // can not be accessed in any of the subsequent snapshots i.e,
+  // this is the latest snapshot diff and if this is the last
+  // reference, the created list can be
   // destroyed.
   priorDiff.getChildrenDiff().destroyCreatedList(
   reclaimContext, currentINode);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index 827feb6..d84b3e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -29,6 +29,7 @@ import static org.mockito.Mockito.spy;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
@@ -2409,4 +2410,67 @@ public class TestRenameWithSnapshots {
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", 
"newDir/file2"));
 assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", 
"newDir/file1"));
   }
+
+  @Test (timeout=6)
+  public void testDoubleRenamesWithSnapshotDelete() throws Exception {
+hdfs.mkdirs(sub1);
+hdfs.allowSnapshot(sub1);
+final Path dir1 = new Path(sub1, "dir1");
+final Path dir2 = new Path(sub1, "dir2");
+final Path dir3 = new Path(sub1, "dir3");
+final String snap3 = "snap3";
+final String snap4 = "snap4";
+final String snap5 = "snap5";
+final String snap6 = "snap6";
+final Path foo = new Path(dir2, "foo");
+final Path bar = new Path(dir2, "bar");
+hdfs.createSnapshot(sub1, snap1);
+hdfs.mkdirs(dir1, new FsPermission((short) 0777));
+rename(dir1, dir2);
+hdfs.createSnapshot(sub1, snap2);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap3);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap4);
+hdfs.delete(foo, false);
+DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
+hdfs.createSnapshot(sub1, snap5);
+rename(dir2, dir3);
+hdfs.createSnapshot(sub1, snap6);
+hdfs.delete(dir3, true);
+deleteSnapshot(sub1, snap6);
+deleteSnapshot(sub1, snap3);
+// save namespace and restart Namenode
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+hdfs.saveNamespace();
+hdfs.setSafeMo

[hadoop] branch trunk updated: HDFS-15313. Ensure inodes in active filesytem are not deleted during snapshot delete. Contributed by Shashikant Banerjee.

2020-04-30 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8234379  HDFS-15313. Ensure inodes in active filesytem are not deleted 
during snapshot delete. Contributed by Shashikant Banerjee.
8234379 is described below

commit 82343790eebc3ebe7ef81f6b89260e5bbf121d83
Author: Shashikant Banerjee 
AuthorDate: Fri May 1 12:15:23 2020 +0530

HDFS-15313. Ensure inodes in active filesytem are not deleted during 
snapshot delete. Contributed by Shashikant Banerjee.
---
 .../apache/hadoop/hdfs/server/namenode/INode.java  | 44 +++
 .../snapshot/DirectoryWithSnapshotFeature.java | 29 +-
 .../java/org/apache/hadoop/hdfs/util/Diff.java | 15 -
 .../server/namenode/TestFSImageWithSnapshot.java   | 65 +-
 .../namenode/snapshot/TestRenameWithSnapshots.java | 41 +-
 5 files changed, 154 insertions(+), 40 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 6b29b33..a9f2035 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -17,27 +17,21 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.PrintStream;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.List;
-import java.util.Map;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
@@ -46,9 +40,14 @@ import org.apache.hadoop.hdfs.util.Diff;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.List;
+import java.util.Map;
 
 /**
  * We keep an in-memory representation of the file/block hierarchy.
@@ -225,6 +224,27 @@ public abstract class INode implements INodeAttributes, 
Diff.Element {
 return this;
   }
 
+  /** Is this inode in the current state? */
+  public boolean isInCurrentState() {
+if (isRoot()) {
+  return true;
+}
+final INodeDirectory parentDir = getParent();
+if (parentDir == null) {
+  return false; // this inode is only referenced in snapshots
+}
+if (!parentDir.isInCurrentState()) {
+  return false;
+}
+final INode child = parentDir.getChild(getLocalNameBytes(),
+Snapshot.CURRENT_STATE_ID);
+if (this == child) {
+  return true;
+}
+return child != null && child.isReference() &&
+this.equals(child.asReference().getReferredINode());
+  }
+
   /** Is this inode in the latest snapshot? */
   public final boolean isInLatestSnapshot(final int latestSnapshotId) {
 if (latestSnapshotId == Snapshot.CURRENT_STATE_ID ||
@@ -234,6 +254,8 @@ public abstract class INode implements INodeAttributes, 
Diff.Element {
 // if parent is a reference node, parent must be a renamed node. We can 
 // stop the check at the reference node.
 if (parent != null && parent.isReference()) {
+  // TODO: Is it a bug to return true?
+  //   Some ancestor nodes may not be in the latest snapshot.
   return true;
 }
 final INodeDirectory parentDir = getParent();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWith

[hadoop] branch trunk updated: HDFS-15319. Fix INode#isInLatestSnapshot() API. Contributed by Shashikant Banerjee.

2020-07-14 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 85d4718  HDFS-15319. Fix INode#isInLatestSnapshot() API. Contributed 
by Shashikant Banerjee.
85d4718 is described below

commit 85d4718ed737d3bfadf815765336465a7a98bc47
Author: Shashikant Banerjee 
AuthorDate: Wed Jul 15 10:32:25 2020 +0530

HDFS-15319. Fix INode#isInLatestSnapshot() API. Contributed by Shashikant 
Banerjee.
---
 .../src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java | 2 --
 1 file changed, 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 6545777..6334ed2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -254,8 +254,6 @@ public abstract class INode implements INodeAttributes, 
Diff.Element {
 // if parent is a reference node, parent must be a renamed node. We can 
 // stop the check at the reference node.
 if (parent != null && parent.isReference()) {
-  // TODO: Is it a bug to return true?
-  //   Some ancestor nodes may not be in the latest snapshot.
   return true;
 }
 final INodeDirectory parentDir = getParent();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15463. Add a tool to validate FsImage. (#2140)

2020-07-19 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2cec50c  HDFS-15463. Add a tool to validate FsImage. (#2140)
2cec50c is described below

commit 2cec50cf1657672e14541717b8222cecc3ad5dd0
Author: Tsz-Wo Nicholas Sze 
AuthorDate: Sun Jul 19 23:14:30 2020 -0700

HDFS-15463. Add a tool to validate FsImage. (#2140)
---
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs  |   4 +
 .../hadoop-hdfs/src/main/bin/hdfs.cmd  |   8 +-
 .../hdfs/server/namenode/EditLogInputStream.java   |   5 +
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../hdfs/server/namenode/FsImageValidation.java| 275 +
 .../apache/hadoop/hdfs/server/namenode/INode.java  |   8 +-
 .../hdfs/server/namenode/INodeReference.java   | 127 +-
 .../server/namenode/INodeReferenceValidation.java  | 224 +
 .../server/namenode/TestFsImageValidation.java |  93 +++
 9 files changed, 740 insertions(+), 6 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 94426a5..7a8bf8d 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -47,6 +47,7 @@ function hadoop_usage
   hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
   hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the 
NameNode"
   hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
+  hadoop_add_subcommand "fsImageValidation" admin "run FsImageValidation to 
check an fsimage"
   hadoop_add_subcommand "getconf" client "get config values from configuration"
   hadoop_add_subcommand "groups" client "get the groups which users belong to"
   hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client"
@@ -143,6 +144,9 @@ function hdfscmd_case
 fsck)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
 ;;
+fsImageValidation)
+  HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.namenode.FsImageValidation
+;;
 getconf)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetConf
 ;;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index a9a7852..23d6a5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto dfsrouter dfsrouteradmin debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck fsImageValidation balancer jmxget oiv oev fetchdt getconf 
groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies 
classpath crypto dfsrouter dfsrouteradmin debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -121,6 +121,11 @@ goto :eof
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
   goto :eof
 
+:fsImageValidation
+  set CLASS=org.apache.hadoop.hdfs.server.namenode.FsImageValidation
+  set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+  goto :eof
+
 :balancer
   set CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_BALANCER_OPTS%
@@ -236,6 +241,7 @@ goto :eof
   @echo   dfsadmin run a DFS admin client
   @echo   haadmin  run a DFS HA admin client
   @echo   fsck run a DFS filesystem checking utility
+  @echo   fsImageValidationrun FsImageValidation to check an fsimage
   @echo   balancer run a cluster balancing utility
   @echo   jmxget   get JMX exported values from NameNode or 
DataNode.
   @echo   oiv  apply the offline fsimage viewer to an fsimage
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
index a4377cd..8f324fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
@@ -209,4 +209,9 @@ public abstract class EditLogInputStream implements 
Closeable {
* even faster data source (e.g. a byt

[hadoop] branch trunk updated: HDFS-15470. Added more unit tests to validate rename behaviour across snapshots. Contributed by Shashikant Banerjee.

2020-07-20 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d9441f9  HDFS-15470. Added more unit tests to validate rename 
behaviour across snapshots. Contributed by Shashikant Banerjee.
d9441f9 is described below

commit d9441f95c362214e249b969c9ccc3fb4e8c1709a
Author: Shashikant Banerjee 
AuthorDate: Tue Jul 21 11:13:05 2020 +0530

HDFS-15470. Added more unit tests to validate rename behaviour across 
snapshots. Contributed by Shashikant Banerjee.
---
 .../server/namenode/TestFSImageWithSnapshot.java   | 178 +
 1 file changed, 178 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
index f27c8f2..3fd725b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
@@ -649,4 +649,182 @@ public class TestFSImageWithSnapshot {
 fsn = cluster.getNamesystem();
 hdfs = cluster.getFileSystem();
   }
+
+
+  @Test (timeout=6)
+  public void testFSImageWithRename1() throws Exception {
+final Path dir1 = new Path("/dir1");
+final Path dir2 = new Path("/dir2");
+hdfs.mkdirs(dir1);
+hdfs.mkdirs(dir2);
+Path dira = new Path(dir1, "dira");
+Path dirx = new Path(dir1, "dirx");
+Path dirb = new Path(dirx, "dirb");
+hdfs.mkdirs(dira);
+hdfs.mkdirs(dirx);
+hdfs.allowSnapshot(dir1);
+hdfs.createSnapshot(dir1, "s0");
+hdfs.mkdirs(dirb);
+hdfs.createSnapshot(dir1, "s1");
+Path rennamePath = new Path(dira, "dirb");
+// mv /dir1/dirx/dirb to /dir1/dira/dirb
+hdfs.rename(dirb, rennamePath);
+hdfs.createSnapshot(dir1, "s2");
+Path diry = new Path("/dir1/dira/dirb/diry");
+hdfs.mkdirs(diry);
+hdfs.createSnapshot(dir1, "s3");
+Path file1 = new Path("/dir1/dira/dirb/diry/file1");
+DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, (short) 1, seed);
+hdfs.createSnapshot(dir1, "s4");
+hdfs.delete(new Path("/dir1/dira/dirb"), true);
+hdfs.deleteSnapshot(dir1, "s1");
+hdfs.deleteSnapshot(dir1, "s3");
+// file1 should exist in the last snapshot
+assertTrue(hdfs.exists(
+new Path("/dir1/.snapshot/s4/dira/dirb/diry/file1")));
+
+// save namespace and restart cluster
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+hdfs.saveNamespace();
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+cluster.shutdown();
+cluster = new MiniDFSCluster.Builder(conf).format(false)
+.numDataNodes(NUM_DATANODES).build();
+cluster.waitActive();
+fsn = cluster.getNamesystem();
+hdfs = cluster.getFileSystem();
+  }
+
+  @Test (timeout=6)
+  public void testFSImageWithRename2() throws Exception {
+final Path dir1 = new Path("/dir1");
+final Path dir2 = new Path("/dir2");
+hdfs.mkdirs(dir1);
+hdfs.mkdirs(dir2);
+Path dira = new Path(dir1, "dira");
+Path dirx = new Path(dir1, "dirx");
+Path dirb = new Path(dirx, "dirb");
+hdfs.mkdirs(dira);
+hdfs.mkdirs(dirx);
+hdfs.allowSnapshot(dir1);
+hdfs.createSnapshot(dir1, "s0");
+hdfs.mkdirs(dirb);
+hdfs.createSnapshot(dir1, "s1");
+Path rennamePath = new Path(dira, "dirb");
+// mv /dir1/dirx/dirb to /dir1/dira/dirb
+hdfs.rename(dirb, rennamePath);
+hdfs.createSnapshot(dir1, "s2");
+Path file1 = new Path("/dir1/dira/dirb/file1");
+DFSTestUtil.createFile(hdfs,
+new Path(
+"/dir1/dira/dirb/file1"), BLOCKSIZE, (short) 1, seed);
+hdfs.createSnapshot(dir1, "s3");
+hdfs.deleteSnapshot(dir1, "s1");
+hdfs.deleteSnapshot(dir1, "s3");
+assertTrue(hdfs.exists(file1));
+
+// save namespace and restart cluster
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+hdfs.saveNamespace();
+hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+cluster.shutdown();
+cluster = new MiniDFSCluster.Builder(conf).format(false)
+.numDataNodes(NUM_DATANODES).build();
+cluster.waitActive();
+fsn = cluster.getNamesystem();
+hdfs = cluster.getFileSystem();
+  }
+
+  @Test(timeout = 6)
+  public void testFSImageWithRename3() throws Exception {
+final Path dir1 = new Path("/dir1");
+

[hadoop] branch trunk updated: HDFS-15479. Ordered snapshot deletion: make it a configurable feature (#2156)

2020-07-20 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d57462f  HDFS-15479. Ordered snapshot deletion: make it a configurable 
feature (#2156)
d57462f is described below

commit d57462f2daee5f057e32219d4123a3f75506d6d4
Author: Tsz-Wo Nicholas Sze 
AuthorDate: Mon Jul 20 23:06:24 2020 -0700

HDFS-15479. Ordered snapshot deletion: make it a configurable feature 
(#2156)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   7 +-
 .../hdfs/server/namenode/FSDirSnapshotOp.java  |  37 +++-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  20 
 .../snapshot/DirectorySnapshottableFeature.java|   2 +-
 .../namenode/TestOrderedSnapshotDeletion.java  | 105 +
 5 files changed, 164 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9de33ff..dd24785 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -500,8 +500,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
 
   public static final String DFS_NAMENODE_SNAPSHOT_MAX_LIMIT =
   "dfs.namenode.snapshot.max.limit";
-
   public static final int DFS_NAMENODE_SNAPSHOT_MAX_LIMIT_DEFAULT = 65536;
+
+  public static final String DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED =
+  "dfs.namenode.snapshot.deletion.ordered";
+  public static final boolean DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED_DEFAULT
+  = false;
+
   public static final String DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL =
   "dfs.namenode.snapshot.skiplist.interval";
   public static final int DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL_DEFAULT 
=
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
index c854f83..c2eb401 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -249,12 +250,41 @@ class FSDirSnapshotOp {
   fsd.checkOwner(pc, iip);
 }
 
+// time of snapshot deletion
+final long now = Time.now();
+if (fsd.isSnapshotDeletionOrdered()) {
+  final INodeDirectory srcRoot = snapshotManager.getSnapshottableRoot(iip);
+  final DirectorySnapshottableFeature snapshottable
+  = srcRoot.getDirectorySnapshottableFeature();
+  final Snapshot snapshot = snapshottable.getSnapshotByName(
+  srcRoot, snapshotName);
+
+  // Diffs must be not empty since a snapshot exists in the list
+  final int earliest = snapshottable.getDiffs().iterator().next()
+  .getSnapshotId();
+  if (snapshot.getId() != earliest) {
+throw new SnapshotException("Failed to delete snapshot " + snapshotName
++ " from directory " + srcRoot.getFullPathName()
++ ": " + snapshot + " is not the earliest snapshot id=" + earliest
++ " (" + DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED
++ " is " + fsd.isSnapshotDeletionOrdered() + ")");
+  }
+}
+
+final INode.BlocksMapUpdateInfo collectedBlocks = deleteSnapshot(
+fsd, snapshotManager, iip, snapshotName, now);
+fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
+logRetryCache, now);
+return collectedBlocks;
+  }
+
+  static INode.BlocksMapUpdateInfo deleteSnapshot(
+  FSDirectory fsd, SnapshotManager snapshotManager, INodesInPath iip,
+  String snapshotName, long now) throws IOException {
 INode.BlocksMapUpdateInfo collectedBlocks = new 
INode.BlocksMapUpdateInfo();
 ChunkedArrayList removedINodes = new ChunkedArrayList<>();
 INode.ReclaimContext context = new INode.ReclaimContext(
 fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, 
null);
-// time of snapshot deletion
-final long now = Time.now();
 fsd.writeLock();
  

[hadoop] branch trunk updated (3eaf627 -> 6828737)

2020-07-29 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 3eaf627  YARN-10343. Legacy RM UI should include labeled metrics for 
allocated, total, and reserved resources. Contributed by Eric Payne
 add 6828737  HDFS-15488. Add a command to list all snapshots for a 
snaphottable root with snapshot Ids. (#2166)

No new revisions were added by this update.

Summary of changes:
 .../dev-support/findbugsExcludeFile.xml|   1 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java |  19 ++
 .../apache/hadoop/hdfs/DFSOpsCountStatistics.java  |   1 +
 .../apache/hadoop/hdfs/DistributedFileSystem.java  |  14 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java   |  12 ++
 .../hadoop/hdfs/protocol/SnapshotStatus.java   | 226 +
 .../ClientNamenodeProtocolTranslatorPB.java|  22 ++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java |  77 +++
 .../src/main/proto/ClientNamenodeProtocol.proto|  10 +
 .../hadoop-hdfs-client/src/main/proto/hdfs.proto   |  21 ++
 .../apache/hadoop/hdfs/protocol/TestReadOnly.java  |   1 +
 .../federation/router/RouterClientProtocol.java|   7 +
 .../server/federation/router/RouterRpcServer.java  |   7 +
 .../server/federation/router/RouterSnapshot.java   |  35 
 .../server/federation/router/TestRouterRpc.java|  16 +-
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs  |   4 +
 .../hadoop-hdfs/src/main/bin/hdfs.cmd  |   8 +-
 ...ientNamenodeProtocolServerSideTranslatorPB.java |  24 +++
 .../hdfs/server/namenode/FSDirSnapshotOp.java  |  17 ++
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  34 +++-
 .../hdfs/server/namenode/NameNodeRpcServer.java|  11 +
 .../server/namenode/metrics/NameNodeMetrics.java   |   7 +
 .../server/namenode/snapshot/SnapshotManager.java  |  39 +++-
 .../org/apache/hadoop/hdfs/tools/AdminHelper.java  |   2 +-
 .../{LsSnapshottableDir.java => LsSnapshot.java}   |  42 ++--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md  |  10 +
 .../hadoop-hdfs/src/site/markdown/HdfsSnapshots.md |  17 ++
 .../server/namenode/snapshot/TestListSnapshot.java | 134 
 28 files changed, 787 insertions(+), 31 deletions(-)
 create mode 100644 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
 copy 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/{LsSnapshottableDir.java
 => LsSnapshot.java} (59%)
 create mode 100644 
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15481. Ordered snapshot deletion: garbage collect deleted snapshots (#2165)

2020-07-30 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 05b3337  HDFS-15481. Ordered snapshot deletion: garbage collect 
deleted snapshots (#2165)
05b3337 is described below

commit 05b3337a4605dcb6904cb3fe2a58e4dc424ef015
Author: Tsz-Wo Nicholas Sze 
AuthorDate: Thu Jul 30 10:36:51 2020 -0700

HDFS-15481. Ordered snapshot deletion: garbage collect deleted snapshots 
(#2165)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   5 -
 .../hdfs/server/common/HdfsServerConstants.java|   2 +-
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |   6 +-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  11 --
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  36 
 .../hdfs/server/namenode/INodeDirectory.java   |   7 +-
 .../namenode/snapshot/AbstractINodeDiffList.java   |  11 ++
 .../snapshot/DirectorySnapshottableFeature.java|   4 +-
 .../namenode/snapshot/FSImageFormatPBSnapshot.java |   2 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java|   7 +
 .../namenode/snapshot/SnapshotDeletionGc.java  | 110 
 .../server/namenode/snapshot/SnapshotManager.java  | 117 +++--
 .../TestOrderedSnapshotDeletion.java   |  64 +--
 .../snapshot/TestOrderedSnapshotDeletionGc.java| 184 +
 .../namenode/snapshot/TestSnapshotDeletion.java|   4 +-
 15 files changed, 517 insertions(+), 53 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index dd24785..3865f9b 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -502,11 +502,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.namenode.snapshot.max.limit";
   public static final int DFS_NAMENODE_SNAPSHOT_MAX_LIMIT_DEFAULT = 65536;
 
-  public static final String DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED =
-  "dfs.namenode.snapshot.deletion.ordered";
-  public static final boolean DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED_DEFAULT
-  = false;
-
   public static final String DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL =
   "dfs.namenode.snapshot.skiplist.interval";
   public static final int DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL_DEFAULT 
=
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index a55985e..3cd6f28 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -366,7 +366,7 @@ public interface HdfsServerConstants {
   "security.hdfs.unreadable.by.superuser";
   String XATTR_ERASURECODING_POLICY =
   "system.hdfs.erasurecoding.policy";
-  String SNAPSHOT_XATTR_NAME = "system.hdfs.snapshot.deleted";
+  String XATTR_SNAPSHOT_DELETED = "system.hdfs.snapshot.deleted";
 
   String XATTR_SATISFY_STORAGE_POLICY = "user.hdfs.sps";
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 4f215ac..7e1657f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -44,7 +44,7 @@ import java.util.ListIterator;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SNAPSHOT_XATTR_NAME;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SNAPSHOT_DELETED;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 
 public class FSDirXAttrOp {
@@ -328,10 +328,10 @@ public class FSDirXAttrOp {
 SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file.");
   }
 
-  if (xaName.equals(SNAPSHOT_XATTR_NAME) && !(inode.isDirectory() &am

[hadoop] branch trunk updated: HDFS-15497. Make snapshot limit on global as well per snapshot root directory configurable (#2175)

2020-08-04 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e072d33  HDFS-15497. Make snapshot limit on global as well per 
snapshot root directory configurable (#2175)
e072d33 is described below

commit e072d33327b8f5d38b74a15e279d492ad379a47c
Author: bshashikant 
AuthorDate: Tue Aug 4 14:10:29 2020 +0530

HDFS-15497. Make snapshot limit on global as well per snapshot root 
directory configurable (#2175)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  6 +++
 .../server/namenode/snapshot/SnapshotManager.java  | 30 ++--
 .../src/main/resources/hdfs-default.xml|  9 
 .../namenode/snapshot/TestSnapshotManager.java | 53 --
 4 files changed, 82 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3865f9b..d1c32f1 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -501,6 +501,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_NAMENODE_SNAPSHOT_MAX_LIMIT =
   "dfs.namenode.snapshot.max.limit";
   public static final int DFS_NAMENODE_SNAPSHOT_MAX_LIMIT_DEFAULT = 65536;
+  public static final String
+  DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT =
+  "dfs.namenode.snapshot.filesystem.limit";
+  // default value is same as snapshot quota set for a snapshottable directory
+  public static final int
+  DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT_DEFAULT = 65536;
 
   public static final String DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL =
   "dfs.namenode.snapshot.skiplist.interval";
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index d566112..7569fc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -120,12 +120,14 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
   private final boolean snapshotDeletionOrdered;
   private int snapshotCounter = 0;
   private final int maxSnapshotLimit;
+  private final int maxSnapshotFSLimit;
   
   /** All snapshottable directories in the namesystem. */
   private final Map snapshottables =
   new ConcurrentHashMap<>();
 
-  public SnapshotManager(final Configuration conf, final FSDirectory fsdir) {
+  public SnapshotManager(final Configuration conf, final FSDirectory fsdir)
+  throws SnapshotException {
 this.fsdir = fsdir;
 this.captureOpenFiles = conf.getBoolean(
 DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
@@ -138,13 +140,20 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
 DFSConfigKeys.
 DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT_DEFAULT);
 this.maxSnapshotLimit = conf.getInt(
-DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_MAX_LIMIT,
-DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_MAX_LIMIT_DEFAULT);
+DFSConfigKeys.
+DFS_NAMENODE_SNAPSHOT_MAX_LIMIT,
+DFSConfigKeys.
+DFS_NAMENODE_SNAPSHOT_MAX_LIMIT_DEFAULT);
+this.maxSnapshotFSLimit = conf.getInt(
+DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT,
+DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT_DEFAULT);
 LOG.info("Loaded config captureOpenFiles: " + captureOpenFiles
 + ", skipCaptureAccessTimeOnlyChange: "
 + skipCaptureAccessTimeOnlyChange
 + ", snapshotDiffAllowSnapRootDescendant: "
 + snapshotDiffAllowSnapRootDescendant
++ ", maxSnapshotFSLimit: "
++ maxSnapshotFSLimit
 + ", maxSnapshotLimit: "
 + maxSnapshotLimit);
 
@@ -160,6 +169,13 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
 final int skipInterval = conf.getInt(
 DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL,
 DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL_DEFAULT);
+if (maxSnapshotLimit > maxSnapshotFSLimit) {
+  final String errMsg = DFSConfigKeys.
+  DFS_NAMENODE_SNAPSHOT_MAX_LIMIT
+  + " cannot be greater than " +
+  DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_FILESYSTEM_LIMIT;
+  throw new SnapshotException(errMsg)

[hadoop] branch trunk updated (32895f4 -> 592127b)

2020-08-11 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 32895f4  HDFS-15507. [JDK 11] Fix javadoc errors in hadoop-hdfs-client 
module. Contributed by Xieming Li.
 add 592127b  HDFS-15520 Use visitor pattern to visit namespace tree (#2203)

No new revisions were added by this update.

Summary of changes:
 .../apache/hadoop/hdfs/server/namenode/INode.java  |  11 +-
 .../hdfs/server/namenode/INodeDirectory.java   |   6 +
 .../hadoop/hdfs/server/namenode/INodeFile.java |   6 +
 .../hdfs/server/namenode/INodeReference.java   |  10 +-
 .../hadoop/hdfs/server/namenode/INodeSymlink.java  |   9 +-
 .../server/namenode/INodeWithAdditionalFields.java |   2 +-
 .../snapshot/DirectoryWithSnapshotFeature.java |   2 +-
 .../namenode/visitor/NamespacePrintVisitor.java| 227 +++
 .../server/namenode/visitor/NamespaceVisitor.java  | 243 +
 .../hdfs/server/namenode/visitor}/package.html |   4 +-
 .../server/namenode/TestFSImageWithSnapshot.java   |   4 +
 11 files changed, 515 insertions(+), 9 deletions(-)
 create mode 100644 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/NamespacePrintVisitor.java
 create mode 100644 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor/NamespaceVisitor.java
 copy {hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util 
=> 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/visitor}/package.html
 (93%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15496. Add UI for deleted snapshots (#2212)

2020-08-13 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cb50e3f  HDFS-15496. Add UI for deleted snapshots (#2212)
cb50e3f is described below

commit cb50e3fcf70f8d5cb30238e96f87f9d6e2f2260a
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu Aug 13 10:06:15 2020 -0700

HDFS-15496. Add UI for deleted snapshots (#2212)
---
 .../hadoop/hdfs/protocol/SnapshotStatus.java   | 54 --
 .../apache/hadoop/hdfs/protocol/SnapshotInfo.java  | 14 --
 .../server/namenode/snapshot/SnapshotManager.java  | 11 +++--
 .../src/main/webapps/hdfs/dfshealth.html   |  2 +
 4 files changed, 19 insertions(+), 62 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
index 8c0dabd..3e2a7ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
@@ -174,60 +174,6 @@ public class SnapshotStatus {
 return Math.max(n, String.valueOf(value).length());
   }
 
-  /**
-   * To be used to for collection of snapshot jmx.
-   */
-  public static class Bean {
-private final String path;
-private final int snapshotID;
-private final long modificationTime;
-private final short permission;
-private final String owner;
-private final String group;
-private final boolean isDeleted;
-
-
-public Bean(String path, int snapshotID, long
-modificationTime, short permission, String owner, String group,
-boolean isDeleted) {
-  this.path = path;
-  this.snapshotID = snapshotID;
-  this.modificationTime = modificationTime;
-  this.permission = permission;
-  this.owner = owner;
-  this.group = group;
-  this.isDeleted = isDeleted;
-}
-
-public String getPath() {
-  return path;
-}
-
-public int getSnapshotID() {
-  return snapshotID;
-}
-
-public long getModificationTime() {
-  return modificationTime;
-}
-
-public short getPermission() {
-  return permission;
-}
-
-public String getOwner() {
-  return owner;
-}
-
-public String getGroup() {
-  return group;
-}
-
-public boolean isDeleted() {
-  return isDeleted;
-}
-  }
-
   static String getSnapshotPath(String snapshottableDir,
 String snapshotRelativePath) {
 String parentFullPathStr =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
index 676e827..ef54778 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
@@ -82,18 +82,20 @@ public class SnapshotInfo {
   }
 
   public static class Bean {
-private final String snapshotID;
+private final int snapshotID;
 private final String snapshotDirectory;
 private final long modificationTime;
+private final String status;
 
-public Bean(String snapshotID, String snapshotDirectory,
-long modificationTime) {
+public Bean(int snapshotID, String snapshotDirectory,
+long modificationTime, boolean isMarkedAsDeleted) {
   this.snapshotID = snapshotID;
   this.snapshotDirectory = snapshotDirectory;
   this.modificationTime = modificationTime;
+  this.status = isMarkedAsDeleted ? "DELETED" : "ACTIVE";
 }
 
-public String getSnapshotID() {
+public int getSnapshotID() {
   return snapshotID;
 }
 
@@ -104,5 +106,9 @@ public class SnapshotInfo {
 public long getModificationTime() {
   return modificationTime;
 }
+
+public String getStatus() {
+  return status;
+}
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index 7569fc6..3866125 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -746,16 +746,19 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
 d.getDirectorySnapshottableFeatu

[hadoop] branch trunk updated: HDFS-15496. Add UI for deleted snapshots (#2212)

2020-08-13 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cb50e3f  HDFS-15496. Add UI for deleted snapshots (#2212)
cb50e3f is described below

commit cb50e3fcf70f8d5cb30238e96f87f9d6e2f2260a
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu Aug 13 10:06:15 2020 -0700

HDFS-15496. Add UI for deleted snapshots (#2212)
---
 .../hadoop/hdfs/protocol/SnapshotStatus.java   | 54 --
 .../apache/hadoop/hdfs/protocol/SnapshotInfo.java  | 14 --
 .../server/namenode/snapshot/SnapshotManager.java  | 11 +++--
 .../src/main/webapps/hdfs/dfshealth.html   |  2 +
 4 files changed, 19 insertions(+), 62 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
index 8c0dabd..3e2a7ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
@@ -174,60 +174,6 @@ public class SnapshotStatus {
 return Math.max(n, String.valueOf(value).length());
   }
 
-  /**
-   * To be used to for collection of snapshot jmx.
-   */
-  public static class Bean {
-private final String path;
-private final int snapshotID;
-private final long modificationTime;
-private final short permission;
-private final String owner;
-private final String group;
-private final boolean isDeleted;
-
-
-public Bean(String path, int snapshotID, long
-modificationTime, short permission, String owner, String group,
-boolean isDeleted) {
-  this.path = path;
-  this.snapshotID = snapshotID;
-  this.modificationTime = modificationTime;
-  this.permission = permission;
-  this.owner = owner;
-  this.group = group;
-  this.isDeleted = isDeleted;
-}
-
-public String getPath() {
-  return path;
-}
-
-public int getSnapshotID() {
-  return snapshotID;
-}
-
-public long getModificationTime() {
-  return modificationTime;
-}
-
-public short getPermission() {
-  return permission;
-}
-
-public String getOwner() {
-  return owner;
-}
-
-public String getGroup() {
-  return group;
-}
-
-public boolean isDeleted() {
-  return isDeleted;
-}
-  }
-
   static String getSnapshotPath(String snapshottableDir,
 String snapshotRelativePath) {
 String parentFullPathStr =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
index 676e827..ef54778 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
@@ -82,18 +82,20 @@ public class SnapshotInfo {
   }
 
   public static class Bean {
-private final String snapshotID;
+private final int snapshotID;
 private final String snapshotDirectory;
 private final long modificationTime;
+private final String status;
 
-public Bean(String snapshotID, String snapshotDirectory,
-long modificationTime) {
+public Bean(int snapshotID, String snapshotDirectory,
+long modificationTime, boolean isMarkedAsDeleted) {
   this.snapshotID = snapshotID;
   this.snapshotDirectory = snapshotDirectory;
   this.modificationTime = modificationTime;
+  this.status = isMarkedAsDeleted ? "DELETED" : "ACTIVE";
 }
 
-public String getSnapshotID() {
+public int getSnapshotID() {
   return snapshotID;
 }
 
@@ -104,5 +106,9 @@ public class SnapshotInfo {
 public long getModificationTime() {
   return modificationTime;
 }
+
+public String getStatus() {
+  return status;
+}
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index 7569fc6..3866125 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -746,16 +746,19 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
 d.getDirectorySnapshottableFeatu

[hadoop] branch trunk updated (aee3b97 -> 15a76e8)

2020-08-14 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from aee3b97  HADOOP-17206. Add python2 to required package on CentOS 8 for 
building documentation. (#2227)
 add 15a76e8  HDFS-15524. Add edit log entry for Snapshot deletion GC 
thread snapshot deletion. (#2219)

No new revisions were added by this update.

Summary of changes:
 .../hdfs/server/namenode/FSDirSnapshotOp.java  | 10 ++--
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../snapshot/TestOrderedSnapshotDeletionGc.java| 53 --
 3 files changed, 57 insertions(+), 8 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15500. In-order deletion of snapshots: Diff lists must be update only in the last snapshot. (#2233)

2020-08-27 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 41182a9  HDFS-15500. In-order deletion of snapshots: Diff lists must 
be update only in the last snapshot. (#2233)
41182a9 is described below

commit 41182a9b6d81d0c8a4dc0a9cf89ea0ade815afd3
Author: Tsz-Wo Nicholas Sze 
AuthorDate: Thu Aug 27 02:24:52 2020 -0700

HDFS-15500. In-order deletion of snapshots: Diff lists must be update only 
in the last snapshot. (#2233)
---
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java |  6 ++
 .../hdfs/server/namenode/snapshot/AbstractINodeDiffList.java |  6 +-
 .../hdfs/server/namenode/snapshot/DiffListByArrayList.java   |  2 ++
 .../namenode/snapshot/DirectoryWithSnapshotFeature.java  |  2 ++
 .../hdfs/server/namenode/snapshot/SnapshotManager.java   | 12 
 5 files changed, 27 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 15bf6b1..badf237 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1569,6 +1569,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   // null in some unit tests
   haContext.checkOperation(op);
 }
+
+boolean assertsEnabled = false;
+assert assertsEnabled = true; // Intentional side effect!!!
+if (assertsEnabled && op == OperationCategory.WRITE) {
+  getSnapshotManager().initThreadLocals();
+}
   }
   
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 776adf1..16e3b75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -76,7 +76,11 @@ abstract class AbstractINodeDiffList>
 
   @Override
   public T remove(int i) {
+// DeletionOrdered: only can remove the element at index 0
+assert !SnapshotManager.isDeletionOrdered() || i == 0;
 return list.remove(i);
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index b8f7b65..c3a9aa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -175,6 +175,8 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
 final INode.ReclaimContext reclaimContext,
 final INodeDirectory currentDir,
 final DirectoryDiff posterior) {
+  // DeletionOrdered: must not combine posterior
+  assert !SnapshotManager.isDeletionOrdered();
   diff.combinePosterior(posterior.diff, new Diff.Processor() {
 /** Collect blocks for deleted files. */
 @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index b5b0971..789fa3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -95,6 +95,18 @@ public class SnapshotManager implements SnapshotStatsMXBean {
   static final long DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED_GC_PERIOD_MS_DEFAULT
   = 5 * 60_000L; //5 minutes
 
+  private static final ThreadLocal DELETION_ORDERED
+  = new ThreadLocal<>();
+
+  static boolean isDeletionOrdered() {
+final Boolean b = DELETION_ORDERED.get();
+return b != null? b: false;
+  }
+
+  public void initThreadLocals() {
+DELETION_ORDERED.set(isSnapshotDeletionOrdered());
+  }
+
   private final FSDirectory fsdir;
   private boolean captureOpenFiles;
   /**


-
To unsubscribe, e-mai

[hadoop] branch trunk updated: HDFS-15542. Add identified snapshot corruption tests for ordered snapshot deletion (#2251)

2020-08-30 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2d03209  HDFS-15542. Add identified snapshot corruption tests for 
ordered snapshot deletion (#2251)
2d03209 is described below

commit 2d03209a06df361e4d224795ed4dce8f9414d70f
Author: bshashikant 
AuthorDate: Mon Aug 31 11:29:48 2020 +0530

HDFS-15542. Add identified snapshot corruption tests for ordered snapshot 
deletion (#2251)
---
 .../TestFSImageWithOrderedSnapshotDeletion.java| 495 +
 1 file changed, 495 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFSImageWithOrderedSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFSImageWithOrderedSnapshotDeletion.java
new file mode 100644
index 000..dec28e4
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFSImageWithOrderedSnapshotDeletion.java
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.*;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.visitor.NamespacePrintVisitor;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.event.Level;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import static 
org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test FSImage correctness with ordered snapshot deletion.
+ */
+public class TestFSImageWithOrderedSnapshotDeletion {
+  {
+SnapshotTestHelper.disableLogs();
+GenericTestUtils.setLogLevel(INode.LOG, Level.TRACE);
+  }
+
+  static final long SEED = 0;
+  static final short NUM_DATANODES = 1;
+  static final int BLOCKSIZE = 1024;
+
+  private final Path dir = new Path("/TestSnapshot");
+  private static final String TEST_DIR =
+  GenericTestUtils.getTestDir().getAbsolutePath();
+
+  Configuration conf;
+  MiniDFSCluster cluster;
+  FSNamesystem fsn;
+  DistributedFileSystem hdfs;
+
+  @Before
+  public void setUp() throws Exception {
+conf = new Configuration();
+conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES)
+.build();
+cluster.waitActive();
+fsn = cluster.getNamesystem();
+hdfs = cluster.getFileSystem();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+if (cluster != null) {
+  cluster.shutdown();
+  cluster = null;
+}
+  }
+
+  void rename(Path src, Path dst) throws Exception {
+printTree("Before rename " + src + " -> " + dst);
+hdfs.rename(src, dst);
+printTree("After rename " + src + " -> " + dst);
+  }
+
+  void createFile(Path directory, String filename) throws Exception {
+final Path f = new Path(directory, filename);
+DFSTestUtil.createFile(hdfs, f, 0, NUM_DATANODES, SEED);
+  }
+
+  void appendFile(Path directory, String filename) throws Exception {
+final Path f = new Path(directory, filename);
+DFSTestUtil.appendFile(hdfs, f, "more data");
+printTree("appended " + f);
+  }
+
+  void deleteSnapshot(Path directory, String snapshotName) throws Exception {
+hdfs.deleteSnapshot(directory, snapshotName);
+printTree("deleted snapshot " + snapshotName);
+  }
+
+  @Test (timeout=6)
+  public

[hadoop] branch trunk updated (e5fe326 -> 43c52d6)

2020-09-10 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from e5fe326  HADOOP-17165. Implement service-user feature in 
DecayRPCScheduler. (#2240)
 add 43c52d6  HDFS-15563. Incorrect getTrashRoot return value when a 
non-snapshottable dir prefix matches the path of a snapshottable dir (#2295)

No new revisions were added by this update.

Summary of changes:
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java|  3 +++
 .../java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java | 10 ++
 2 files changed, 13 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15568. namenode start failed to start when dfs.namenode.max.snapshot.limit set. (#2296)

2020-09-17 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 425f487  HDFS-15568. namenode start failed to start when 
dfs.namenode.max.snapshot.limit set. (#2296)
425f487 is described below

commit 425f48799c0666ef6acd6dab0d5299eb86a0ed44
Author: bshashikant 
AuthorDate: Thu Sep 17 14:50:08 2020 +0530

HDFS-15568. namenode start failed to start when 
dfs.namenode.max.snapshot.limit set. (#2296)
---
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  8 +++
 .../hdfs/server/namenode/INodeDirectory.java   |  9 ++-
 .../snapshot/DirectorySnapshottableFeature.java| 17 +++---
 .../server/namenode/snapshot/SnapshotManager.java  | 48 
 .../namenode/snapshot/TestSnapshotManager.java | 66 +-
 5 files changed, 122 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index ae1afb4..03d696e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -509,6 +509,14 @@ public class FSDirectory implements Closeable {
   }
 
   /**
+   * Indicates whether the image loading is complete or not.
+   * @return true if image loading is complete, false otherwise
+   */
+  public boolean isImageLoaded() {
+return namesystem.isImageLoaded();
+  }
+
+  /**
* Parse configuration setting dfs.namenode.protected.directories to
* retrieve the set of protected directories.
*
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 3836d79..0373312 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -283,12 +283,11 @@ public class INodeDirectory extends 
INodeWithAdditionalFields
* @param name Name of the snapshot.
* @param mtime The snapshot creation time set by Time.now().
*/
-  public Snapshot addSnapshot(int id, String name,
-  final LeaseManager leaseManager, final boolean captureOpenFiles,
-  int maxSnapshotLimit, long mtime)
+  public Snapshot addSnapshot(SnapshotManager snapshotManager, String name,
+  final LeaseManager leaseManager, long mtime)
   throws SnapshotException {
-return getDirectorySnapshottableFeature().addSnapshot(this, id, name,
-leaseManager, captureOpenFiles, maxSnapshotLimit, mtime);
+return getDirectorySnapshottableFeature().addSnapshot(this,
+snapshotManager, name, leaseManager, mtime);
   }
 
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 3bf2df4..7a47ab4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -172,28 +172,25 @@ public class DirectorySnapshottableFeature extends 
DirectoryWithSnapshotFeature
   /**
* Add a snapshot.
* @param snapshotRoot Root of the snapshot.
+   * @param snapshotManager SnapshotManager Instance.
* @param name Name of the snapshot.
* @param leaseManager
-   * @param captureOpenFiles
* @throws SnapshotException Throw SnapshotException when there is a snapshot
*   with the same name already exists or snapshot quota exceeds
*/
-  public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name,
-  final LeaseManager leaseManager, final boolean captureOpenFiles,
-  int maxSnapshotLimit, long now)
+  public Snapshot addSnapshot(INodeDirectory snapshotRoot,
+  SnapshotManager snapshotManager, String name,
+  final LeaseManager leaseManager, long now)
   throws SnapshotException {
+int id = snapshotManager.getSnapshotCounter();
 //check snapshot quota
 final int n = getNumSnapshots();
 if (n + 1 > snapshotQuota) {
   throw new SnapshotException("Failed to add snapshot: there are already "
   + n + " snapshot(s) and the

[hadoop] branch trunk updated: HDFS-15590. namenode fails to start when ordered snapshot deletion feature is disabled (#2326)

2020-09-24 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 368f2f6  HDFS-15590. namenode fails to start when ordered snapshot 
deletion feature is disabled (#2326)
368f2f6 is described below

commit 368f2f637e8dfeecdda8db2dbb1445beac053ac2
Author: bshashikant 
AuthorDate: Thu Sep 24 14:00:41 2020 +0530

HDFS-15590. namenode fails to start when ordered snapshot deletion feature 
is disabled (#2326)
---
 .../snapshot/DirectorySnapshottableFeature.java  | 18 ++
 .../server/namenode/snapshot/SnapshotManager.java|  9 ++---
 .../org/apache/hadoop/hdfs/TestSnapshotCommands.java |  4 ++--
 .../snapshot/TestOrderedSnapshotDeletion.java| 20 
 4 files changed, 46 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 7a47ab4..8a215b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -241,6 +241,24 @@ public class DirectorySnapshottableFeature extends 
DirectoryWithSnapshotFeature
   throws SnapshotException {
 final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
 if (i < 0) {
+  // considering a sequence like this with snapshots S1 and s2
+  // 1. Ordered snapshot deletion feature is turned on
+  // 2. Delete S2 creating edit log entry for S2 deletion
+  // 3. Delete S1
+  // 4. S2 gets deleted by snapshot gc thread creating edit log record for
+  //S2 deletion again
+  // 5. Disable Ordered snapshot deletion feature
+  // 6. Restarting Namenode
+  // In this case, when edit log replay happens actual deletion of S2
+  // will happen when first edit log for S2 deletion gets replayed and
+  // the second edit log record replay for S2 deletion will fail as 
snapshot
+  // won't exist thereby failing the Namenode start
+  // The idea here is to check during edit log replay, if a certain 
snapshot
+  // is not found and the ordered snapshot deletion is off, ignore the 
error
+  if (!snapshotManager.isSnapshotDeletionOrdered() &&
+  !snapshotManager.isImageLoaded()) {
+return null;
+  }
   throw new SnapshotException("Cannot delete snapshot " + snapshotName
   + " from path " + snapshotRoot.getFullPathName()
   + ": the snapshot does not exist.");
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index 04d6b71..2c183f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -479,10 +479,10 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
   void checkSnapshotLimit(int limit, int snapshotCount, String type)
   throws SnapshotException {
 if (snapshotCount >= limit) {
-  String msg = "there are already " + (snapshotCount + 1)
+  String msg = "there are already " + snapshotCount
   + " snapshot(s) and the "  + type + " snapshot limit is "
   + limit;
-  if (fsdir.isImageLoaded()) {
+  if (isImageLoaded()) {
 // We have reached the maximum snapshot limit
 throw new SnapshotException(
 "Failed to create snapshot: " + msg);
@@ -492,7 +492,10 @@ public class SnapshotManager implements 
SnapshotStatsMXBean {
   }
 }
   }
-  
+
+  boolean isImageLoaded() {
+return fsdir.isImageLoaded();
+  }
   /**
* Delete a snapshot for a snapshottable directory
* @param snapshotName Name of the snapshot to be deleted
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
index 2b5a69d..32ac298 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
@@ -128,8 +128,8 @@ public class TestSnapshotComm

[hadoop] branch trunk updated: HDFS-15611. Add list Snapshot command in WebHDFS. (#2355)

2020-10-06 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 16aea11  HDFS-15611. Add list Snapshot command in WebHDFS. (#2355)
16aea11 is described below

commit 16aea11c945c84936984e80241dcdd4a0d4b7f58
Author: bshashikant 
AuthorDate: Wed Oct 7 10:34:32 2020 +0530

HDFS-15611. Add list Snapshot command in WebHDFS. (#2355)
---
 .../hadoop/hdfs/protocol/SnapshotStatus.java   | 20 -
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java | 36 +
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 14 
 .../hadoop/hdfs/web/resources/GetOpParam.java  |  3 +-
 .../hadoop/fs/http/client/HttpFSFileSystem.java| 16 +++-
 .../apache/hadoop/fs/http/server/FSOperations.java | 38 +
 .../fs/http/server/HttpFSParametersProvider.java   |  1 +
 .../apache/hadoop/fs/http/server/HttpFSServer.java |  8 ++
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 51 +++-
 .../hadoop/fs/http/server/TestHttpFSServer.java| 47 +++
 .../web/resources/NamenodeWebHdfsMethods.java  |  7 ++
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  | 24 ++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 92 ++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java| 61 ++
 14 files changed, 412 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
index 3e2a7ae..8f10db8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
@@ -73,9 +73,18 @@ public class SnapshotStatus {
 this.parentFullPath = parentFullPath;
   }
 
+  public SnapshotStatus(HdfsFileStatus dirStatus,
+  int snapshotID, boolean isDeleted,
+  byte[] parentFullPath) {
+this.dirStatus = dirStatus;
+this.snapshotID = snapshotID;
+this.isDeleted = isDeleted;
+this.parentFullPath = parentFullPath;
+  }
+
   /**
-   * sets the prent path name.
-   * @param path parent path
+   * sets the path name.
+   * @param path path
*/
   public void setParentFullPath(byte[] path) {
 parentFullPath = path;
@@ -174,7 +183,7 @@ public class SnapshotStatus {
 return Math.max(n, String.valueOf(value).length());
   }
 
-  static String getSnapshotPath(String snapshottableDir,
+  public static String getSnapshotPath(String snapshottableDir,
 String snapshotRelativePath) {
 String parentFullPathStr =
 snapshottableDir == null || snapshottableDir.isEmpty() ?
@@ -188,4 +197,9 @@ public class SnapshotStatus {
 .append(snapshotRelativePath)
 .toString();
   }
+
+  public static String getParentPath(String snapshotPath) {
+int index = snapshotPath.indexOf(HdfsConstants.DOT_SNAPSHOT_DIR);
+return index == -1 ? snapshotPath : snapshotPath.substring(0, index - 1);
+  }
 }
\ No newline at end of file
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index e846b56..6ce01a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -872,4 +873,39 @@ public class JsonUtilClient {
 snapshotQuota, parentFullPath);
 return snapshottableDirectoryStatus;
   }
+
+  public static SnapshotStatus[] toSnapshotList(final Map json) {
+if (json == null) {
+  return null;
+}
+List list = (List) json.get("SnapshotList");
+if (list == null) {
+  return null;
+}
+SnapshotStatus[] statuses =
+new SnapshotStatus[list.size()];
+for (int i = 0; i < list.size(); i++) {
+  statuses[i] = toSnapshotStatus((Map) list.get(i));
+}
+return statuses;
+  }
+
+  private static SnapshotStatus toSnapshotStatus(
+ 

[hadoop] branch trunk updated (ad1d409 -> 356ebbb)

2021-09-12 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ad1d409  YARN-10928. Support default queue config for 
minimum-user-limit-percent/user-limit-factor (#3389)
 add 356ebbb  HDFS-16187. SnapshotDiff behaviour with Xattrs and Acls is 
not consistent across NN restarts with checkpointing (#3340)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/hdfs/server/namenode/XAttrFeature.java  | 17 +++
 .../hdfs/server/namenode/snapshot/Snapshot.java| 15 ++
 .../apache/hadoop/hdfs/TestEncryptionZones.java| 34 ++
 .../namenode/snapshot/TestXAttrWithSnapshot.java   | 26 +
 4 files changed, 87 insertions(+), 5 deletions(-)

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   >