[hadoop] branch HDDS-1564 updated: HDDS-2089: Add createPipeline CLI. (#1418)

2019-09-12 Thread xyao
This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch HDDS-1564
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDDS-1564 by this push:
 new 326b5ac  HDDS-2089: Add createPipeline CLI. (#1418)
326b5ac is described below

commit 326b5acd4a63fe46821919322867f5daff30750c
Author: Li Cheng 
AuthorDate: Fri Sep 13 07:01:16 2019 +0800

HDDS-2089: Add createPipeline CLI. (#1418)
---
 .../org/apache/hadoop/ozone/audit/SCMAction.java   |  1 +
 ...inerLocationProtocolServerSideTranslatorPB.java | 10 ++-
 .../hdds/scm/pipeline/SimplePipelineProvider.java  |  2 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |  8 +--
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java |  2 +
 .../scm/cli/pipeline/CreatePipelineSubcommand.java | 71 ++
 6 files changed, 87 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
index b3b4879..d60848c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
@@ -31,6 +31,7 @@ public enum SCMAction implements AuditAction {
   GET_CONTAINER,
   GET_CONTAINER_WITH_PIPELINE,
   LIST_CONTAINER,
+  CREATE_PIPELINE,
   LIST_PIPELINE,
   CLOSE_PIPELINE,
   DELETE_CONTAINER,
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 30ef7ea..3ac1582 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -234,8 +234,14 @@ public final class 
StorageContainerLocationProtocolServerSideTranslatorPB
   public PipelineResponseProto allocatePipeline(
   RpcController controller, PipelineRequestProto request)
   throws ServiceException {
-// TODO : Wiring this up requires one more patch.
-return null;
+try (Scope scope = TracingUtil
+.importAndCreateScope("createPipeline", request.getTraceID())) {
+  impl.createReplicationPipeline(request.getReplicationType(),
+  request.getReplicationFactor(), request.getNodePool());
+  return PipelineResponseProto.newBuilder().build();
+} catch (IOException e) {
+  throw new ServiceException(e);
+}
   }
 
   @Override
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
index ab98dfa..54e2141 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
@@ -48,7 +48,7 @@ public class SimplePipelineProvider implements 
PipelineProvider {
   String e = String
   .format("Cannot create pipeline of factor %d using %d nodes.",
   factor.getNumber(), dns.size());
-  throw new IOException(e);
+  throw new InsufficientDatanodesException(e);
 }
 
 Collections.shuffle(dns);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index bf75fef..e838471 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -390,10 +390,10 @@ public class SCMClientProtocolServer implements
   public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
   HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
   throws IOException {
-// TODO: will be addressed in future patch.
-// This is needed only for debugging purposes to make sure cluster is
-// working correctly.
-return null;
+Pipeline result = scm.getPipelineManager().createPipeline(type, factor);
+AUDIT.logWriteSuccess(
+buildAuditMessageForSuccess(SCMAction.CREATE_PIPELINE, null));
+return result;
   }
 
   @Override
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
index 3e8f3fa..5143aec 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.jav

[hadoop] branch trunk updated: HDFS-14754. Erasure Coding : The number of Under-Replicated Blocks never reduced. Contributed by hemanthboyina.

2019-09-12 Thread surendralilhore
This is an automated email from the ASF dual-hosted git repository.

surendralilhore pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4852a90  HDFS-14754. Erasure Coding : The number of Under-Replicated 
Blocks never reduced. Contributed by  hemanthboyina.
4852a90 is described below

commit 4852a90e4b077ece2d68595210e62959a9923683
Author: Surendra Singh Lilhore 
AuthorDate: Fri Sep 13 00:17:13 2019 +0530

HDFS-14754. Erasure Coding : The number of Under-Replicated Blocks never 
reduced. Contributed by  hemanthboyina.
---
 .../hdfs/server/blockmanagement/BlockManager.java  |   3 +-
 .../hdfs/server/namenode/TestRedudantBlocks.java   | 137 +
 2 files changed, 138 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3f718a0..6c349ffd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3437,8 +3437,7 @@ public class BlockManager implements BlockStatsMXBean {
   private boolean shouldProcessExtraRedundancy(NumberReplicas num,
   int expectedNum) {
 final int numCurrent = num.liveReplicas();
-return numCurrent > expectedNum ||
-(numCurrent == expectedNum && num.redundantInternalBlocks() > 0);
+return numCurrent > expectedNum || num.redundantInternalBlocks() > 0;
   }
 
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java
new file mode 100644
index 000..943699a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test RedudantBlocks.
+ */
+public class TestRedudantBlocks {
+
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private final Path dirPath = new Path("/striped");
+  private Path filePath = new Path(dirPath, "file");
+  private final ErasureCodingPolicy ecPolicy =
+  SystemErasureCodingPolicies.getPolicies().get(1);
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final short groupSize = (short) (dataBlocks + parityBlocks);
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripesPerBlock = 4;
+  private final int blockSize = stripesPerBlock * cellSize;
+  private final int numDNs = groupSize + 1;
+
+  @Before
+  public void setup() throws IOException {
+Configuration conf = new Configuration();
+conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+// disable block 

[hadoop] branch trunk updated (fe8cdf0 -> 1505d3f)

2019-09-12 Thread gabota
This is an automated email from the ASF dual-hosted git repository.

gabota pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from fe8cdf0  HDDS-2076. Read fails because the block cannot be located in 
the container (#1410)
 add 1505d3f  HADOOP-16566. S3Guard fsck: Use 
org.apache.hadoop.util.StopWatch instead of com.google.common.base.Stopwatch 
(#1433). Contributed by Gabor Bota.

No new revisions were added by this update.

Summary of changes:
 .../main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java| 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-2076. Read fails because the block cannot be located in the container (#1410)

2019-09-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new aaee64c  HDDS-2076. Read fails because the block cannot be located in 
the container (#1410)
aaee64c is described below

commit aaee64cfbcdea5e7c6c0e26f405278f5695f0f6b
Author: Shashikant Banerjee 
AuthorDate: Thu Sep 12 21:16:39 2019 +0530

HDDS-2076. Read fails because the block cannot be located in the container 
(#1410)

Signed-off-by: Nanda kumar 
(cherry picked from commit fe8cdf0ab846df9c2f3f59d1d4875185633a27ea)
---
 .../keyvalue/helpers/KeyValueContainerUtil.java|   7 +
 .../rpc/TestContainerReplicationEndToEnd.java  | 215 +
 2 files changed, 222 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index d5487b3..c0e7d78 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -24,12 +24,15 @@ import java.nio.file.Paths;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandResponseProto;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -193,6 +196,10 @@ public final class KeyValueContainerUtil {
   }).sum();
   kvContainerData.setBytesUsed(bytesUsed);
   kvContainerData.setKeyCount(liveKeys.size());
+  byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes(
+  OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
+  Preconditions.checkNotNull(bcsId);
+  kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
 }
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
new file mode 100644
index 000..e5a3d2f
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.c

[hadoop] branch trunk updated: HDDS-2076. Read fails because the block cannot be located in the container (#1410)

2019-09-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fe8cdf0  HDDS-2076. Read fails because the block cannot be located in 
the container (#1410)
fe8cdf0 is described below

commit fe8cdf0ab846df9c2f3f59d1d4875185633a27ea
Author: Shashikant Banerjee 
AuthorDate: Thu Sep 12 21:16:39 2019 +0530

HDDS-2076. Read fails because the block cannot be located in the container 
(#1410)

Signed-off-by: Nanda kumar 
---
 .../keyvalue/helpers/KeyValueContainerUtil.java|   7 +
 .../rpc/TestContainerReplicationEndToEnd.java  | 215 +
 2 files changed, 222 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index d5487b3..c0e7d78 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -24,12 +24,15 @@ import java.nio.file.Paths;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandResponseProto;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -193,6 +196,10 @@ public final class KeyValueContainerUtil {
   }).sum();
   kvContainerData.setBytesUsed(bytesUsed);
   kvContainerData.setKeyCount(liveKeys.size());
+  byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes(
+  OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
+  Preconditions.checkNotNull(bcsId);
+  kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
 }
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
new file mode 100644
index 000..e5a3d2f
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.KeyOutputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream

[hadoop] branch trunk updated: HDFS-14798. Synchronize invalidateBlocks in DatanodeDescriptor. Contributed by hemanthboyina.

2019-09-12 Thread surendralilhore
This is an automated email from the ASF dual-hosted git repository.

surendralilhore pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2ff2a7f  HDFS-14798. Synchronize invalidateBlocks in 
DatanodeDescriptor. Contributed by  hemanthboyina.
2ff2a7f is described below

commit 2ff2a7f6120079b6a88afff987a551fa3d1f47e2
Author: Surendra Singh Lilhore 
AuthorDate: Thu Sep 12 19:43:18 2019 +0530

HDFS-14798. Synchronize invalidateBlocks in DatanodeDescriptor. Contributed 
by  hemanthboyina.
---
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
old mode 100644
new mode 100755
index e0d8558..b3e2fa6
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -339,7 +339,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
 
   public void resetBlocks() {
 updateStorageStats(this.getStorageReports(), 0L, 0L, 0, 0, null);
-this.invalidateBlocks.clear();
+synchronized (invalidateBlocks) {
+  this.invalidateBlocks.clear();
+}
 this.volumeFailures = 0;
 // pendingCached, cached, and pendingUncached are protected by the
 // FSN lock.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-14699. Erasure Coding: Storage not considered in live replica when replication streams hard limit reached to threshold. Contributed by Zhao Yi Ming.

2019-09-12 Thread surendralilhore
This is an automated email from the ASF dual-hosted git repository.

surendralilhore pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new eb1ddcd  HDFS-14699. Erasure Coding: Storage not considered in live 
replica when replication streams hard limit reached to threshold. Contributed 
by Zhao Yi Ming.
eb1ddcd is described below

commit eb1ddcd04c9b0457e19fcc3b320d5b86cc1fda64
Author: Surendra Singh Lilhore 
AuthorDate: Thu Sep 12 19:11:50 2019 +0530

HDFS-14699. Erasure Coding: Storage not considered in live replica when 
replication streams hard limit reached to threshold. Contributed by Zhao Yi 
Ming.

(cherry picked from commit d1c303a49763029fffa5164295034af8e81e74a0)
---
 .../hdfs/server/blockmanagement/BlockManager.java  | 24 ---
 .../server/blockmanagement/TestBlockManager.java   | 74 ++
 2 files changed, 90 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 50f6454..99ec1f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2355,6 +2355,22 @@ public class BlockManager implements BlockStatsMXBean {
   && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) {
 continue; // already reached replication limit
   }
+
+  // for EC here need to make sure the numReplicas replicates state correct
+  // because in the scheduleReconstruction it need the numReplicas to check
+  // whether need to reconstruct the ec internal block
+  byte blockIndex = -1;
+  if (isStriped) {
+blockIndex = ((BlockInfoStriped) block)
+.getStorageBlockIndex(storage);
+if (!bitSet.get(blockIndex)) {
+  bitSet.set(blockIndex);
+} else if (state == StoredReplicaState.LIVE) {
+  numReplicas.subtract(StoredReplicaState.LIVE, 1);
+  numReplicas.add(StoredReplicaState.REDUNDANT, 1);
+}
+  }
+
   if (node.getNumberOfBlocksToBeReplicated() >= 
replicationStreamsHardLimit) {
 continue;
   }
@@ -2362,15 +2378,7 @@ public class BlockManager implements BlockStatsMXBean {
   if(isStriped || srcNodes.isEmpty()) {
 srcNodes.add(node);
 if (isStriped) {
-  byte blockIndex = ((BlockInfoStriped) block).
-  getStorageBlockIndex(storage);
   liveBlockIndices.add(blockIndex);
-  if (!bitSet.get(blockIndex)) {
-bitSet.set(blockIndex);
-  } else if (state == StoredReplicaState.LIVE) {
-numReplicas.subtract(StoredReplicaState.LIVE, 1);
-numReplicas.add(StoredReplicaState.REDUNDANT, 1);
-  }
 }
 continue;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 3bd41e2..c56293a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -67,6 +68,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetworkTopology;
@@ -686,6 +688,67 @@ public class TestBlockManager {
   }
 
   @Test
+  public void testChooseSrcDatanodesWithDupEC() throws Exception {
+bm.maxReplicationStreams = 4;
+
+long blockId = -9223372036854775776L; // real ec block id
+Block aBlock = new Block(blockId, 0, 0);
+// ec policy
+ECSchema rsSchema = new ECSchema("rs", 3, 2);
+String policyName = "RS-3-2-128k";
+int cellSize = 128 * 1024;
+Erasu

[hadoop] branch trunk updated: HDFS-14699. Erasure Coding: Storage not considered in live replica when replication streams hard limit reached to threshold. Contributed by Zhao Yi Ming.

2019-09-12 Thread surendralilhore
This is an automated email from the ASF dual-hosted git repository.

surendralilhore pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d1c303a  HDFS-14699. Erasure Coding: Storage not considered in live 
replica when replication streams hard limit reached to threshold. Contributed 
by Zhao Yi Ming.
d1c303a is described below

commit d1c303a49763029fffa5164295034af8e81e74a0
Author: Surendra Singh Lilhore 
AuthorDate: Thu Sep 12 19:11:50 2019 +0530

HDFS-14699. Erasure Coding: Storage not considered in live replica when 
replication streams hard limit reached to threshold. Contributed by Zhao Yi 
Ming.
---
 .../hdfs/server/blockmanagement/BlockManager.java  | 24 ---
 .../server/blockmanagement/TestBlockManager.java   | 74 ++
 2 files changed, 90 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8561cfd..3f718a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2351,6 +2351,22 @@ public class BlockManager implements BlockStatsMXBean {
   && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) {
 continue; // already reached replication limit
   }
+
+  // for EC here need to make sure the numReplicas replicates state correct
+  // because in the scheduleReconstruction it need the numReplicas to check
+  // whether need to reconstruct the ec internal block
+  byte blockIndex = -1;
+  if (isStriped) {
+blockIndex = ((BlockInfoStriped) block)
+.getStorageBlockIndex(storage);
+if (!bitSet.get(blockIndex)) {
+  bitSet.set(blockIndex);
+} else if (state == StoredReplicaState.LIVE) {
+  numReplicas.subtract(StoredReplicaState.LIVE, 1);
+  numReplicas.add(StoredReplicaState.REDUNDANT, 1);
+}
+  }
+
   if (node.getNumberOfBlocksToBeReplicated() >= 
replicationStreamsHardLimit) {
 continue;
   }
@@ -2358,15 +2374,7 @@ public class BlockManager implements BlockStatsMXBean {
   if(isStriped || srcNodes.isEmpty()) {
 srcNodes.add(node);
 if (isStriped) {
-  byte blockIndex = ((BlockInfoStriped) block).
-  getStorageBlockIndex(storage);
   liveBlockIndices.add(blockIndex);
-  if (!bitSet.get(blockIndex)) {
-bitSet.set(blockIndex);
-  } else if (state == StoredReplicaState.LIVE) {
-numReplicas.subtract(StoredReplicaState.LIVE, 1);
-numReplicas.add(StoredReplicaState.REDUNDANT, 1);
-  }
 }
 continue;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index de0e1a6..006513c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -67,6 +68,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetworkTopology;
@@ -686,6 +688,67 @@ public class TestBlockManager {
   }
 
   @Test
+  public void testChooseSrcDatanodesWithDupEC() throws Exception {
+bm.maxReplicationStreams = 4;
+
+long blockId = -9223372036854775776L; // real ec block id
+Block aBlock = new Block(blockId, 0, 0);
+// ec policy
+ECSchema rsSchema = new ECSchema("rs", 3, 2);
+String policyName = "RS-3-2-128k";
+int cellSize = 128 * 1024;
+ErasureCodingPolicy ecPolicy =
+new ErasureCodingPolicy(policyName, rsSchema, cellSiz

[hadoop] branch ozone-0.4.1 updated: HDDS-2075. Tracing in OzoneManager call is propagated with wrong parent (#1415)

2019-09-12 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new d8c98c5  HDDS-2075. Tracing in OzoneManager call is propagated with 
wrong parent (#1415)
d8c98c5 is described below

commit d8c98c5fc138734d51794bda5cf83984628095cd
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Wed Sep 11 20:59:01 2019 +0200

HDDS-2075. Tracing in OzoneManager call is propagated with wrong parent 
(#1415)

(cherry picked from commit 64ed6b177d6b00b22d45576a8517432dc6c03348)
---
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java| 7 +--
 .../om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java  | 5 ++---
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 0bbcd03..8e3deb1 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -150,8 +150,11 @@ public class RpcClient implements ClientProtocol {
 this.userRights = aclConfig.getUserDefaultRights();
 this.groupRights = aclConfig.getGroupDefaultRights();
 
-this.ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB(
-this.conf, clientId.toString(), ugi);
+this.ozoneManagerClient = TracingUtil.createProxy(
+new OzoneManagerProtocolClientSideTranslatorPB(
+this.conf, clientId.toString(), ugi),
+OzoneManagerProtocol.class, conf
+);
 long scmVersion =
 RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
 InetSocketAddress scmAddress = getScmAddressForClient();
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 094e689..44db898 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -202,10 +202,9 @@ public final class 
OzoneManagerProtocolClientSideTranslatorPB
 OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY,
 OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT);
 
-this.rpcProxy = TracingUtil.createProxy(
+this.rpcProxy =
 createRetryProxy(omFailoverProxyProvider, maxRetries, maxFailovers,
-sleepBase, sleepMax),
-OzoneManagerProtocolPB.class, conf);
+sleepBase, sleepMax);
 this.clientID = clientId;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 protoc executable (#1429). Contributed by Vinayakumar B.

2019-09-12 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f4f9f0f  HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 
protoc executable (#1429). Contributed by Vinayakumar B.
f4f9f0f is described below

commit f4f9f0fe4f215e2e1b88b0607102f22388acfe45
Author: Vinayakumar B 
AuthorDate: Thu Sep 12 16:47:54 2019 +0530

HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 protoc 
executable (#1429). Contributed by Vinayakumar B.
---
 dev-support/docker/Dockerfile | 16 
 1 file changed, 16 insertions(+)

diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index e71e51c..fe38395 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -212,6 +212,22 @@ RUN curl -L -o hugo.deb 
https://github.com/gohugoio/hugo/releases/download/v0.30
 && dpkg --install hugo.deb \
 && rm hugo.deb
 
+##
+# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial)
+# Keep 2.5.0 as well, until 3.7.1 upgrade is complete.
+##
+# hadolint ignore=DL3003
+RUN mkdir -p /opt/protobuf-3.7-src \
+&& curl -L -s -S \
+  
https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz
 \
+  -o /opt/protobuf-3.7.1.tar.gz \
+&& tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C 
/opt/protobuf-3.7-src \
+&& cd /opt/protobuf-3.7-src \
+&& ./configure --prefix=/opt/protobuf-3.7 \
+&& make install \
+&& cd /root \
+&& rm -rf /opt/protobuf-3.7-src
+
 # Add a welcome message and environment checks.
 COPY hadoop_env_checks.sh /root/hadoop_env_checks.sh
 RUN chmod 755 /root/hadoop_env_checks.sh


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16423. S3Guard fsck: Check metadata consistency between S3 and metadatastore (log) (#1208). Contributed by Gabor Bota.

2019-09-12 Thread gabota
This is an automated email from the ASF dual-hosted git repository.

gabota pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4e273a3  HADOOP-16423. S3Guard fsck: Check metadata consistency 
between S3 and metadatastore (log) (#1208). Contributed by Gabor Bota.
4e273a3 is described below

commit 4e273a31f66013b7c20e8114451f5bc6c741f2cc
Author: Gabor Bota 
AuthorDate: Thu Sep 12 13:12:46 2019 +0200

HADOOP-16423. S3Guard fsck: Check metadata consistency between S3 and 
metadatastore (log) (#1208). Contributed by Gabor Bota.

Change-Id: I6bbb331b6c0a41c61043e482b95504fda8a50596
---
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java|   2 +-
 .../apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java  | 483 
 .../s3a/s3guard/S3GuardFsckViolationHandler.java   | 346 ++
 .../apache/hadoop/fs/s3a/s3guard/S3GuardTool.java  |  97 +++-
 .../fs/s3a/ITestS3GuardOutOfBandOperations.java|  11 +-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java |  34 ++
 .../hadoop/fs/s3a/s3guard/ITestS3GuardFsck.java| 504 +
 .../fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java   |  30 ++
 .../fs/s3a/s3guard/MetadataStoreTestBase.java  |   2 +-
 9 files changed, 1500 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 0ce9823..6bdbba3 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1501,7 +1501,7 @@ public class S3AFileSystem extends FileSystem implements 
StreamCapabilities,
* is set for this filesystem.
*/
   @VisibleForTesting
-  boolean hasAuthoritativeMetadataStore() {
+  public boolean hasAuthoritativeMetadataStore() {
 return hasMetadataStore() && allowAuthoritativeMetadataStore;
   }
 
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java
new file mode 100644
index 000..a9925df
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java
@@ -0,0 +1,483 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.s3guard;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.security.InvalidParameterException;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.base.Stopwatch;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+
+import static java.util.stream.Collectors.toList;
+import static java.util.stream.Collectors.toSet;
+
+/**
+ * Main class for the FSCK factored out from S3GuardTool
+ * The implementation uses fixed DynamoDBMetadataStore as the backing store
+ * for metadata.
+ *
+ * Functions:
+ * 
+ *   Checking metadata consistency between S3 and metadatastore
+ * 
+ */
+public class S3GuardFsck {
+  private static final Logger LOG = LoggerFactory.getLogger(S3GuardFsck.class);
+  public static final String ROOT_PATH_STRING = "/";
+
+  private final S3AFileSystem rawFS;
+  private final DynamoDBMetadataStore metadataStore;
+
+  private static final long MOD_TIME_RANGE = 2000L;
+
+  /**
+   * Creates an S3GuardFsck.
+   * @param fs the filesystem to compare to
+   * @param ms metadatastore the metadatastore to compare with (dynamo)
+   */
+  public S3GuardFsck(S3AFileSystem fs, MetadataStore ms)
+  throws InvalidParameterException {
+this.rawFS = fs;
+
+if (ms == null) {
+  throw new I

[hadoop] branch trunk updated: YARN-9816. EntityGroupFSTimelineStore#scanActiveLogs fails when undesired files are present under /ats/active. Contribued by Prabhu Joseph.

2019-09-12 Thread abmodi
This is an automated email from the ASF dual-hosted git repository.

abmodi pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 44850f6  YARN-9816. EntityGroupFSTimelineStore#scanActiveLogs fails 
when undesired files are present under /ats/active. Contribued by Prabhu Joseph.
44850f6 is described below

commit 44850f67848bd6fe5bfc2ebad693da77184053b7
Author: Abhishek Modi 
AuthorDate: Thu Sep 12 12:50:10 2019 +0530

YARN-9816. EntityGroupFSTimelineStore#scanActiveLogs fails when undesired 
files are present under /ats/active. Contribued by Prabhu Joseph.
---
 .../server/timeline/EntityGroupFSTimelineStore.java |  7 ++-
 .../server/timeline/TestEntityGroupFSTimelineStore.java | 17 +
 2 files changed, 23 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java
index e10eb1b..8f00b85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java
@@ -376,7 +376,12 @@ public class EntityGroupFSTimelineStore extends 
CompositeService
 AppLogs logs = getAndSetActiveLog(appId, stat.getPath());
 executor.execute(new ActiveLogParser(logs));
   } else {
-logsToScanCount += scanActiveLogs(stat.getPath());
+if (stat.isDirectory()) {
+  logsToScanCount += scanActiveLogs(stat.getPath());
+} else {
+  LOG.warn("Ignoring unexpected file in active directory {}",
+  stat.getPath());
+}
   }
 }
 return logsToScanCount;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java
index 8fcc696..984e157 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java
@@ -511,6 +511,23 @@ public class TestEntityGroupFSTimelineStore extends 
TimelineStoreTestUtils {
   }
 
   @Test
+  public void testScanActiveLogsWithInvalidFile() throws Exception {
+Path invalidFile = new Path(testActiveDirPath, "invalidfile");
+try {
+  if (!fs.exists(invalidFile)) {
+fs.createNewFile(invalidFile);
+  }
+  store.scanActiveLogs();
+} catch (StackOverflowError error) {
+  Assert.fail("EntityLogScanner crashed with StackOverflowError");
+} finally {
+  if (fs.exists(invalidFile)) {
+fs.delete(invalidFile, false);
+  }
+}
+  }
+
+  @Test
   public void testScanActiveLogsAndMoveToDonePluginRead() throws Exception {
 EntityGroupFSTimelineStore store = null;
 ApplicationId appId =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org