[hadoop] branch trunk updated: HDDS-1934. TestSecureOzoneCluster may fail due to port conflict (#1254)

2019-08-08 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 88ed1e0  HDDS-1934. TestSecureOzoneCluster may fail due to port 
conflict (#1254)
88ed1e0 is described below

commit 88ed1e0bfd6652d1803ebae0b3e743316cc8d11e
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Fri Aug 9 06:38:31 2019 +0200

HDDS-1934. TestSecureOzoneCluster may fail due to port conflict (#1254)
---
 .../test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java  | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 247c9d7..853b6a2 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
+import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
@@ -271,7 +272,7 @@ public final class TestSecureOzoneCluster {
   public void testSCMSecurityProtocol() throws Exception {
 
 initSCM();
-scm = StorageContainerManager.createSCM(conf);
+scm = HddsTestUtils.getScm(conf);
 //Reads the SCM Info from SCM instance
 try {
   scm.start();
@@ -739,7 +740,7 @@ public final class TestSecureOzoneCluster {
 
 initSCM();
 try {
-  scm = StorageContainerManager.createSCM(conf);
+  scm = HddsTestUtils.getScm(conf);
   scm.start();
   conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false);
   OMStorage omStore = new OMStorage(conf);
@@ -785,7 +786,7 @@ public final class TestSecureOzoneCluster {
 omLogs.clearOutput();
 initSCM();
 try {
-  scm = StorageContainerManager.createSCM(conf);
+  scm = HddsTestUtils.getScm(conf);
   scm.start();
 
   OMStorage omStore = new OMStorage(conf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1884. Support Bucket ACL operations for OM HA. (#1202)

2019-08-08 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 91f41b7  HDDS-1884. Support Bucket ACL operations for OM HA. (#1202)
91f41b7 is described below

commit 91f41b7d885d7b0f3abf132a5c8e8812fb179330
Author: Bharat Viswanadham 
AuthorDate: Thu Aug 8 21:29:00 2019 -0700

HDDS-1884. Support Bucket ACL operations for OM HA. (#1202)
---
 .../hadoop/ozone/om/exceptions/OMException.java|   5 +-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java  |  95 ++-
 .../hadoop/ozone/util/BooleanBiFunction.java   |  11 ++
 .../src/main/proto/OzoneManagerProtocol.proto  |   2 +
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java | 142 
 .../om/ratis/utils/OzoneManagerRatisUtils.java |  15 +-
 .../om/request/bucket/acl/OMBucketAclRequest.java  | 186 +
 .../request/bucket/acl/OMBucketAddAclRequest.java  | 122 ++
 .../bucket/acl/OMBucketRemoveAclRequest.java   | 101 +++
 .../request/bucket/acl/OMBucketSetAclRequest.java  | 100 +++
 .../ozone/om/request/bucket/acl/package-info.java  |  23 +++
 .../hadoop/ozone/om/request/util/ObjectParser.java |  74 
 .../hadoop/ozone/om/request/util/package-info.java |  23 +++
 .../response/bucket/acl/OMBucketAclResponse.java   |  62 +++
 .../ozone/om/response/bucket/acl/package-info.java |  22 +++
 15 files changed, 978 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index 78bdb21..1e291ed 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -205,6 +205,9 @@ public class OMException extends IOException {
 
 S3_BUCKET_INVALID_LENGTH,
 
-RATIS_ERROR // Error in Ratis server
+RATIS_ERROR, // Error in Ratis server
+
+INVALID_PATH_IN_ACL_REQUEST // Error code when path name is invalid during
+// acl requests.
   }
 }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index 51cabe6..4d764a5 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.om.helpers;
 
 
+import java.util.BitSet;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.LinkedList;
@@ -30,11 +31,14 @@ import org.apache.hadoop.hdds.protocol.StorageType;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.Auditable;
-import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+.BucketInfo;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 
 import com.google.common.base.Preconditions;
 
+import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET;
+
 /**
  * A class that encapsulates Bucket Info.
  */
@@ -125,6 +129,95 @@ public final class OmBucketInfo extends WithMetadata 
implements Auditable {
   }
 
   /**
+   * Add an ozoneAcl to list of existing Acl set.
+   * @param ozoneAcl
+   * @return true - if successfully added, false if not added or acl is
+   * already existing in the acl list.
+   */
+  public boolean addAcl(OzoneAcl ozoneAcl) {
+// Case 1: When we are adding more rights to existing user/group.
+boolean addToExistingAcl = false;
+for(OzoneAcl existingAcl: getAcls()) {
+  if(existingAcl.getName().equals(ozoneAcl.getName()) &&
+  existingAcl.getType().equals(ozoneAcl.getType())) {
+
+BitSet bits = (BitSet) ozoneAcl.getAclBitSet().clone();
+
+// We need to do "or" before comparision because think of a case like
+// existing acl is 777 and newly added acl is 444, we have already
+// that acl set. In this case if we do direct check they will not
+// be equal, but if we do or and then check, we shall know it
+// has acl's already set or not.
+bits.or(existingAcl.getAclBitSet());
+
+if (bits.equals(existingAcl.getAclBitSet())) {
+  return false;
+} else {
+  existingAcl.getAclBitSet().or(ozoneAcl.getAclBitSet());
+  addToExistingAcl = true;
+  break;
+}
+  }
+}
+
+// Case 2: When a completely new acl is added.
+if(!addToExistingAcl) {
+  getAcls().add(ozoneAcl);
+}
+return true;
+  }
+
+  /**
+  

[hadoop] branch trunk updated: HDDS-1863. Freon RandomKeyGenerator even if keySize is set to 0, it returns some random data to key. (#1167)

2019-08-08 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new aa5f445  HDDS-1863. Freon RandomKeyGenerator even if keySize is set to 
0, it returns some random data to key. (#1167)
aa5f445 is described below

commit aa5f445fb9d06f9967aadf305fa3cd509a16b982
Author: Bharat Viswanadham 
AuthorDate: Thu Aug 8 15:40:19 2019 -0700

HDDS-1863. Freon RandomKeyGenerator even if keySize is set to 0, it returns 
some random data to key. (#1167)
---
 .../apache/hadoop/ozone/freon/RandomKeyGenerator.java | 10 ++
 .../hadoop/ozone/freon/TestRandomKeyGenerator.java| 19 +++
 2 files changed, 21 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index 5198ac3..7cfd1fe 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -26,7 +26,6 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.UUID;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
@@ -263,9 +262,7 @@ public final class RandomKeyGenerator implements 
Callable {
 // Compute the common initial digest for all keys without their UUID
 if (validateWrites) {
   commonInitialMD = DigestUtils.getDigest(DIGEST_ALGORITHM);
-  int uuidLength = UUID.randomUUID().toString().length();
-  keySize = Math.max(uuidLength, keySize);
-  for (long nrRemaining = keySize - uuidLength; nrRemaining > 0;
+  for (long nrRemaining = keySize; nrRemaining > 0;
   nrRemaining -= bufferSize) {
 int curSize = (int)Math.min(bufferSize, nrRemaining);
 commonInitialMD.update(keyValueBuffer, 0, curSize);
@@ -682,7 +679,6 @@ public final class RandomKeyGenerator implements 
Callable {
 + RandomStringUtils.randomNumeric(5);
 LOG.trace("Adding key: {} in bucket: {} of volume: {}",
 keyName, bucketName, volumeName);
-byte[] randomValue = DFSUtil.string2Bytes(UUID.randomUUID().toString());
 try {
   try (Scope scope = GlobalTracer.get().buildSpan("createKey")
   .startActive(true)) {
@@ -697,12 +693,11 @@ public final class RandomKeyGenerator implements 
Callable {
 try (Scope writeScope = GlobalTracer.get().buildSpan("writeKeyData")
 .startActive(true)) {
   long keyWriteStart = System.nanoTime();
-  for (long nrRemaining = keySize - randomValue.length;
+  for (long nrRemaining = keySize;
nrRemaining > 0; nrRemaining -= bufferSize) {
 int curSize = (int) Math.min(bufferSize, nrRemaining);
 os.write(keyValueBuffer, 0, curSize);
   }
-  os.write(randomValue);
   os.close();
 
   long keyWriteDuration = System.nanoTime() - keyWriteStart;
@@ -716,7 +711,6 @@ public final class RandomKeyGenerator implements 
Callable {
 
   if (validateWrites) {
 MessageDigest tmpMD = (MessageDigest) commonInitialMD.clone();
-tmpMD.update(randomValue);
 boolean validate = validationQueue.offer(
 new KeyValidate(bucket, keyName, tmpMD.digest()));
 if (validate) {
diff --git 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
index 748972e..45ea23d 100644
--- 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
+++ 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
@@ -129,6 +129,25 @@ public class TestRandomKeyGenerator {
   }
 
   @Test
+  public void fileWithSizeZero() throws Exception {
+RandomKeyGenerator randomKeyGenerator =
+new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
+randomKeyGenerator.setNumOfVolumes(1);
+randomKeyGenerator.setNumOfBuckets(1);
+randomKeyGenerator.setNumOfKeys(1);
+randomKeyGenerator.setNumOfThreads(1);
+randomKeyGenerator.setKeySize(0);
+randomKeyGenerator.setFactor(ReplicationFactor.THREE);
+randomKeyGenerator.setType(ReplicationType.RATIS);
+randomKeyGenerator.setValidateWrites(true);
+randomKeyGenerator.call();
+Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
+Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
+Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
+Assert.assertEquals(1, 

[hadoop] branch branch-2 updated: HDFS-14696. Backport HDFS-11273 to branch-2 (Move TransferFsImage#doGetUrl function to a Util class) (#1251) Contributed by Siyao Meng.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 15062b6  HDFS-14696. Backport HDFS-11273 to branch-2 (Move 
TransferFsImage#doGetUrl function to a Util class) (#1251) Contributed by Siyao 
Meng.
15062b6 is described below

commit 15062b6d2882d162dabaf884933d9625fff5ae5f
Author: Siyao Meng <50227127+smen...@users.noreply.github.com>
AuthorDate: Thu Aug 8 14:55:41 2019 -0700

HDFS-14696. Backport HDFS-11273 to branch-2 (Move TransferFsImage#doGetUrl 
function to a Util class) (#1251) Contributed by Siyao Meng.
---
 .../hdfs/server/common/HttpGetFailedException.java |  43 
 .../hdfs/server/common/HttpPutFailedException.java |  43 
 .../org/apache/hadoop/hdfs/server/common/Util.java | 261 ++-
 .../server/namenode/EditLogFileInputStream.java|   3 +-
 .../hadoop/hdfs/server/namenode/ImageServlet.java  |  15 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java|   4 +-
 .../hdfs/server/namenode/TransferFsImage.java  | 286 ++---
 7 files changed, 374 insertions(+), 281 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HttpGetFailedException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HttpGetFailedException.java
new file mode 100644
index 000..592bee2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HttpGetFailedException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+
+/**
+ * The exception is thrown when HTTP GET operation has failed.
+ *
+ */
+@InterfaceAudience.Private
+public class HttpGetFailedException extends IOException {
+  private static final long serialVersionUID = 1L;
+  private final int responseCode;
+
+  public HttpGetFailedException(String msg, HttpURLConnection connection)
+  throws IOException {
+super(msg);
+this.responseCode = connection.getResponseCode();
+  }
+
+  public int getResponseCode() {
+return responseCode;
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HttpPutFailedException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HttpPutFailedException.java
new file mode 100644
index 000..77a0ee1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HttpPutFailedException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.io.IOException;
+
+
+/**
+ * The exception is thrown when HTTP PUT operation has failed.
+ *
+ */
+@InterfaceAudience.Private
+public class HttpPutFailedException extends IOException {
+  private static final long serialVersionUID = 1L;
+  private final int responseCode;
+
+  public HttpPutFailedException(String msg, int responseCode)
+  throws IOException {
+super(msg);
+this.responseCode = responseCode;
+  }

[hadoop] branch branch-3.1 updated: HDFS-14693. NameNode should log a warning when EditLog IPC logger's pending size exceeds limit. Contributed by Xudong Cao.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 0aa847e  HDFS-14693. NameNode should log a warning when EditLog IPC 
logger's pending size exceeds limit. Contributed by Xudong Cao.
0aa847e is described below

commit 0aa847e1585d1396d2713c95e3a37dd6c3605d96
Author: Wei-Chiu Chuang 
AuthorDate: Thu Aug 8 13:50:30 2019 -0700

HDFS-14693. NameNode should log a warning when EditLog IPC logger's pending 
size exceeds limit. Contributed by Xudong Cao.

(cherry picked from commit 6ad9a11494c3aea146d7741bf0ad52ce16ad08e6)
(cherry picked from commit 2f2fa3dad6dc62ad0e415fa7892008d47a31ddb4)
---
 .../java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java | 4 
 1 file changed, 4 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index 8d3dc42..a0c7e2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -492,6 +492,10 @@ public class IPCLoggerChannel implements AsyncLogger {
 Preconditions.checkArgument(size >= 0);
 if (queuedEditsSizeBytes + size > queueSizeLimitBytes &&
 queuedEditsSizeBytes > 0) {
+  QuorumJournalManager.LOG.warn("Pending edits to " + IPCLoggerChannel.this
+  + " is going to exceed limit size: " + queueSizeLimitBytes
+  + ", current queued edits size: " + queuedEditsSizeBytes
+  + ", will silently drop " + size + " bytes of edits!");
   throw new LoggerTooFarBehindException();
 }
 queuedEditsSizeBytes += size;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-14693. NameNode should log a warning when EditLog IPC logger's pending size exceeds limit. Contributed by Xudong Cao.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 2f2fa3d  HDFS-14693. NameNode should log a warning when EditLog IPC 
logger's pending size exceeds limit. Contributed by Xudong Cao.
2f2fa3d is described below

commit 2f2fa3dad6dc62ad0e415fa7892008d47a31ddb4
Author: Wei-Chiu Chuang 
AuthorDate: Thu Aug 8 13:50:30 2019 -0700

HDFS-14693. NameNode should log a warning when EditLog IPC logger's pending 
size exceeds limit. Contributed by Xudong Cao.

(cherry picked from commit 6ad9a11494c3aea146d7741bf0ad52ce16ad08e6)
---
 .../java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java | 4 
 1 file changed, 4 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index 3247476..3a882e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -492,6 +492,10 @@ public class IPCLoggerChannel implements AsyncLogger {
 Preconditions.checkArgument(size >= 0);
 if (queuedEditsSizeBytes + size > queueSizeLimitBytes &&
 queuedEditsSizeBytes > 0) {
+  QuorumJournalManager.LOG.warn("Pending edits to " + IPCLoggerChannel.this
+  + " is going to exceed limit size: " + queueSizeLimitBytes
+  + ", current queued edits size: " + queuedEditsSizeBytes
+  + ", will silently drop " + size + " bytes of edits!");
   throw new LoggerTooFarBehindException();
 }
 queuedEditsSizeBytes += size;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14693. NameNode should log a warning when EditLog IPC logger's pending size exceeds limit. Contributed by Xudong Cao.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6ad9a11  HDFS-14693. NameNode should log a warning when EditLog IPC 
logger's pending size exceeds limit. Contributed by Xudong Cao.
6ad9a11 is described below

commit 6ad9a11494c3aea146d7741bf0ad52ce16ad08e6
Author: Wei-Chiu Chuang 
AuthorDate: Thu Aug 8 13:50:30 2019 -0700

HDFS-14693. NameNode should log a warning when EditLog IPC logger's pending 
size exceeds limit. Contributed by Xudong Cao.
---
 .../java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java | 4 
 1 file changed, 4 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index 3247476..3a882e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -492,6 +492,10 @@ public class IPCLoggerChannel implements AsyncLogger {
 Preconditions.checkArgument(size >= 0);
 if (queuedEditsSizeBytes + size > queueSizeLimitBytes &&
 queuedEditsSizeBytes > 0) {
+  QuorumJournalManager.LOG.warn("Pending edits to " + IPCLoggerChannel.this
+  + " is going to exceed limit size: " + queueSizeLimitBytes
+  + ", current queued edits size: " + queuedEditsSizeBytes
+  + ", will silently drop " + size + " bytes of edits!");
   throw new LoggerTooFarBehindException();
 }
 queuedEditsSizeBytes += size;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14705. Remove unused configuration dfs.min.replication. Contributed by CR Hota.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2265872  HDFS-14705. Remove unused configuration dfs.min.replication. 
Contributed by CR Hota.
2265872 is described below

commit 2265872c2db98fbaf0cd847af6d12cd4bc76e9b2
Author: Wei-Chiu Chuang 
AuthorDate: Thu Aug 8 13:48:29 2019 -0700

HDFS-14705. Remove unused configuration dfs.min.replication. Contributed by 
CR Hota.
---
 .../src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java| 2 --
 1 file changed, 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index aa8afb0..b65301f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -667,7 +667,6 @@ public class TestFileAppend{
 Configuration conf = new HdfsConfiguration();
 conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
 conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-conf.setInt("dfs.min.replication", 1);
 File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
 .numDataNodes(1).build();
@@ -693,7 +692,6 @@ public class TestFileAppend{
 Configuration conf = new HdfsConfiguration();
 conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
 conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-conf.setInt("dfs.min.replication", 1);
 
 File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14701. Change Log Level to warn in SlotReleaser. Contributed by Lisheng Sun.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 28a8484  HDFS-14701. Change Log Level to warn in SlotReleaser. 
Contributed by Lisheng Sun.
28a8484 is described below

commit 28a848412c8239dfc6bd3e42dbbfe711e19bc8eb
Author: Wei-Chiu Chuang 
AuthorDate: Thu Aug 8 13:46:31 2019 -0700

HDFS-14701. Change Log Level to warn in SlotReleaser. Contributed by 
Lisheng Sun.
---
 .../org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java   | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index 5acac2f..d3eb3ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -202,10 +202,11 @@ public class ShortCircuitCache implements Closeable {
 LOG.trace("{}: released {}", this, slot);
 success = true;
   } catch (IOException e) {
-LOG.error(ShortCircuitCache.this + ": failed to release " +
-"short-circuit shared memory slot " + slot + " by sending " +
-"ReleaseShortCircuitAccessRequestProto to " + path +
-".  Closing shared memory segment.", e);
+LOG.warn(ShortCircuitCache.this + ": failed to release "
++ "short-circuit shared memory slot " + slot + " by sending "
++ "ReleaseShortCircuitAccessRequestProto to " + path
++ ".  Closing shared memory segment. "
++ "DataNode may have been stopped or restarted", e);
   } finally {
 if (success) {
   shmManager.freeSlot(slot);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14662. Document the usage of the new Balancer "asService" parameter. Contributed by Chen Zhang.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 23f91f6  HDFS-14662. Document the usage of the new Balancer 
"asService" parameter. Contributed by Chen Zhang.
23f91f6 is described below

commit 23f91f68b817b59d966156edd0b1171155c07742
Author: Wei-Chiu Chuang 
AuthorDate: Thu Aug 8 13:44:58 2019 -0700

HDFS-14662. Document the usage of the new Balancer "asService" parameter. 
Contributed by Chen Zhang.
---
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md  |  4 
 .../hadoop-hdfs/src/site/markdown/HdfsUserGuide.md | 22 ++
 2 files changed, 26 insertions(+)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index fd77edf..740317f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -278,6 +278,7 @@ Usage:
   [-blockpools ]
   [-idleiterations ]
   [-runDuringUpgrade]
+  [-asService]
 
 | COMMAND\_OPTION | Description |
 |: |: |
@@ -289,6 +290,7 @@ Usage:
 | `-blockpools` \ | The balancer will 
only run on blockpools included in this list. |
 | `-idleiterations` \ | Maximum number of idle iterations before 
exit. This overwrites the default idleiterations(5). |
 | `-runDuringUpgrade` | Whether to run the balancer during an ongoing HDFS 
upgrade. This is usually not desired since it will not affect used space on 
over-utilized machines. |
+| `-asService` | Run Balancer as a long running service. |
 | `-h`\|`--help` | Display the tool usage and help information and exit. |
 
 Runs a cluster balancing utility. An administrator can simply press Ctrl-C to 
stop the rebalancing process. See [Balancer](./HdfsUserGuide.html#Balancer) for 
more details.
@@ -297,6 +299,8 @@ Note that the `blockpool` policy is more strict than the 
`datanode` policy.
 
 Besides the above command options, a pinning feature is introduced starting 
from 2.7.0 to prevent certain replicas from getting moved by balancer/mover. 
This pinning feature is disabled by default, and can be enabled by 
configuration property "dfs.datanode.block-pinning.enabled". When enabled, this 
feature only affects blocks that are written to favored nodes specified in the 
create() call. This feature is useful when we want to maintain the data 
locality, for applications such as HBase  [...]
 
+If you want to run Balancer as a long running service, please start Balancer 
using `-asService` parameter with daemon-mode. You can do this by using the 
following command: `hdfs --daemon start balancer -asService`, or just use 
sbin/start-balancer.sh script with parameter `-asService`.
+
 ### `cacheadmin`
 
 Usage:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
index 6f707f6..54a8056 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
@@ -242,6 +242,28 @@ HDFS data might not always be be placed uniformly across 
the DataNode. One commo
 
 Due to multiple competing considerations, data might not be uniformly placed 
across the DataNodes. HDFS provides a tool for administrators that analyzes 
block placement and rebalanaces data across the DataNode. A brief 
administrator's guide for balancer is available at 
[HADOOP-1652](https://issues.apache.org/jira/browse/HADOOP-1652).
 
+Balancer supports two modes: run as a tool or as a long-running service:
+
+* In tool mode, it'll try to balance the clusters in best effort, and exit for 
the following conditions:
+
+* All clusters are balanced.
+
+* No bytes are moved for too many iterations (default is 5).
+
+* No blocks can be moved.
+
+* Cluster is upgrade in progress.
+
+* Other errors.
+
+* In service mode, balancer will run as a long running daemon service. It 
works like this:
+
+* For each round, it'll try to balance the cluster until success or return 
on error.
+
+* You can config the interval between each round, the interval is set by 
`dfs.balancer.service.interval`.
+
+* When encounter unexpected exceptions, it will try several times before 
stoping the service, which is set by 
`dfs.balancer.service.retries.on.exception`.
+
 For command usage, see [balancer](./HDFSCommands.html#balancer).
 
 Rack Awareness


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-14459. ClosedChannelException silently ignored in FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 163fb88  HDFS-14459. ClosedChannelException silently ignored in 
FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.
163fb88 is described below

commit 163fb887a630140bc332d03d9a7e46b29eb1058b
Author: Stephen O'Donnell 
AuthorDate: Thu Aug 8 13:35:58 2019 -0700

HDFS-14459. ClosedChannelException silently ignored in 
FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit b0799148cf6e92be540f5665bb571418b916d789)
(cherry picked from commit a00a3275944bfdfd0849ac50c89e81b1284c8e13)
---
 .../fsdataset/impl/AddBlockPoolException.java  | 23 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java | 18 -
 .../datanode/fsdataset/impl/FsVolumeList.java  |  5 --
 .../fsdataset/impl/TestAddBlockPoolException.java  | 80 ++
 4 files changed, 118 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
index ef63f00..f08577e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import java.io.IOException;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 
 /**
@@ -35,6 +36,28 @@ public class AddBlockPoolException extends RuntimeException {
 this.unhealthyDataDirs = unhealthyDataDirs;
   }
 
+  public AddBlockPoolException() {
+this.unhealthyDataDirs = new ConcurrentHashMap();
+  }
+
+  public void mergeException(AddBlockPoolException e) {
+if (e == null) {
+  return;
+}
+for(FsVolumeSpi v : e.unhealthyDataDirs.keySet()) {
+  // If there is already an exception for this volume, keep the original
+  // exception and discard the new one. It is likely the first
+  // exception caused the second or they were both due to the disk issue
+  if (!unhealthyDataDirs.containsKey(v)) {
+unhealthyDataDirs.put(v, e.unhealthyDataDirs.get(v));
+  }
+}
+  }
+
+  public boolean hasExceptions() {
+return !unhealthyDataDirs.isEmpty();
+  }
+
   public Map getFailingVolumes() {
 return unhealthyDataDirs;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 29ea1de..cf45e8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2784,16 +2784,28 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   return replica.getVisibleLength();
 }
   }
-  
+
   @Override
   public void addBlockPool(String bpid, Configuration conf)
   throws IOException {
 LOG.info("Adding block pool " + bpid);
+AddBlockPoolException volumeExceptions = new AddBlockPoolException();
 try (AutoCloseableLock lock = datasetLock.acquire()) {
-  volumes.addBlockPool(bpid, conf);
+  try {
+volumes.addBlockPool(bpid, conf);
+  } catch (AddBlockPoolException e) {
+volumeExceptions.mergeException(e);
+  }
   volumeMap.initBlockPool(bpid);
 }
-volumes.getAllVolumesMap(bpid, volumeMap, ramDiskReplicaTracker);
+try {
+  volumes.getAllVolumesMap(bpid, volumeMap, ramDiskReplicaTracker);
+} catch (AddBlockPoolException e) {
+  volumeExceptions.mergeException(e);
+}
+if (volumeExceptions.hasExceptions()) {
+  throw volumeExceptions;
+}
   }
 
   @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 85b85cf..049654b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -203,9 +203,6 @@ 

[hadoop] branch branch-3.2 updated: HDFS-14459. ClosedChannelException silently ignored in FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new a00a327  HDFS-14459. ClosedChannelException silently ignored in 
FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.
a00a327 is described below

commit a00a3275944bfdfd0849ac50c89e81b1284c8e13
Author: Stephen O'Donnell 
AuthorDate: Thu Aug 8 13:35:58 2019 -0700

HDFS-14459. ClosedChannelException silently ignored in 
FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit b0799148cf6e92be540f5665bb571418b916d789)
---
 .../fsdataset/impl/AddBlockPoolException.java  | 23 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java | 18 -
 .../datanode/fsdataset/impl/FsVolumeList.java  |  5 --
 .../fsdataset/impl/TestAddBlockPoolException.java  | 80 ++
 4 files changed, 118 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
index ef63f00..f08577e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import java.io.IOException;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 
 /**
@@ -35,6 +36,28 @@ public class AddBlockPoolException extends RuntimeException {
 this.unhealthyDataDirs = unhealthyDataDirs;
   }
 
+  public AddBlockPoolException() {
+this.unhealthyDataDirs = new ConcurrentHashMap();
+  }
+
+  public void mergeException(AddBlockPoolException e) {
+if (e == null) {
+  return;
+}
+for(FsVolumeSpi v : e.unhealthyDataDirs.keySet()) {
+  // If there is already an exception for this volume, keep the original
+  // exception and discard the new one. It is likely the first
+  // exception caused the second or they were both due to the disk issue
+  if (!unhealthyDataDirs.containsKey(v)) {
+unhealthyDataDirs.put(v, e.unhealthyDataDirs.get(v));
+  }
+}
+  }
+
+  public boolean hasExceptions() {
+return !unhealthyDataDirs.isEmpty();
+  }
+
   public Map getFailingVolumes() {
 return unhealthyDataDirs;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 29ea1de..cf45e8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2784,16 +2784,28 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   return replica.getVisibleLength();
 }
   }
-  
+
   @Override
   public void addBlockPool(String bpid, Configuration conf)
   throws IOException {
 LOG.info("Adding block pool " + bpid);
+AddBlockPoolException volumeExceptions = new AddBlockPoolException();
 try (AutoCloseableLock lock = datasetLock.acquire()) {
-  volumes.addBlockPool(bpid, conf);
+  try {
+volumes.addBlockPool(bpid, conf);
+  } catch (AddBlockPoolException e) {
+volumeExceptions.mergeException(e);
+  }
   volumeMap.initBlockPool(bpid);
 }
-volumes.getAllVolumesMap(bpid, volumeMap, ramDiskReplicaTracker);
+try {
+  volumes.getAllVolumesMap(bpid, volumeMap, ramDiskReplicaTracker);
+} catch (AddBlockPoolException e) {
+  volumeExceptions.mergeException(e);
+}
+if (volumeExceptions.hasExceptions()) {
+  throw volumeExceptions;
+}
   }
 
   @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 85b85cf..049654b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -203,9 +203,6 @@ class FsVolumeList {
 long timeTaken = Time.monotonicNow() - 

[hadoop] branch trunk updated (b079914 -> 14a4ce3)

2019-08-08 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from b079914  HDFS-14459. ClosedChannelException silently ignored in 
FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.
 add 14a4ce3  HDDS-1829 On OM reload/restart OmMetrics#numKeys should be 
updated. Contributed by Siyao Meng.

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/utils/db/RDBTable.java   | 10 ++
 .../main/java/org/apache/hadoop/utils/db/Table.java |  7 +++
 .../java/org/apache/hadoop/utils/db/TypedTable.java |  5 +
 .../apache/hadoop/utils/db/TestRDBTableStore.java   | 21 -
 .../hadoop/utils/db/TestTypedRDBTableStore.java | 21 -
 .../apache/hadoop/ozone/om/OMMetadataManager.java   | 11 +++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  | 10 ++
 .../org/apache/hadoop/ozone/om/OzoneManager.java|  2 ++
 8 files changed, 85 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14459. ClosedChannelException silently ignored in FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b079914  HDFS-14459. ClosedChannelException silently ignored in 
FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.
b079914 is described below

commit b0799148cf6e92be540f5665bb571418b916d789
Author: Stephen O'Donnell 
AuthorDate: Thu Aug 8 13:35:58 2019 -0700

HDFS-14459. ClosedChannelException silently ignored in 
FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
---
 .../fsdataset/impl/AddBlockPoolException.java  | 23 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java | 18 -
 .../datanode/fsdataset/impl/FsVolumeList.java  |  5 --
 .../fsdataset/impl/TestAddBlockPoolException.java  | 80 ++
 4 files changed, 118 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
index ef63f00..f08577e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/AddBlockPoolException.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import java.io.IOException;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 
 /**
@@ -35,6 +36,28 @@ public class AddBlockPoolException extends RuntimeException {
 this.unhealthyDataDirs = unhealthyDataDirs;
   }
 
+  public AddBlockPoolException() {
+this.unhealthyDataDirs = new ConcurrentHashMap();
+  }
+
+  public void mergeException(AddBlockPoolException e) {
+if (e == null) {
+  return;
+}
+for(FsVolumeSpi v : e.unhealthyDataDirs.keySet()) {
+  // If there is already an exception for this volume, keep the original
+  // exception and discard the new one. It is likely the first
+  // exception caused the second or they were both due to the disk issue
+  if (!unhealthyDataDirs.containsKey(v)) {
+unhealthyDataDirs.put(v, e.unhealthyDataDirs.get(v));
+  }
+}
+  }
+
+  public boolean hasExceptions() {
+return !unhealthyDataDirs.isEmpty();
+  }
+
   public Map getFailingVolumes() {
 return unhealthyDataDirs;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 1ce8603..fb365d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2824,16 +2824,28 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   return replica.getVisibleLength();
 }
   }
-  
+
   @Override
   public void addBlockPool(String bpid, Configuration conf)
   throws IOException {
 LOG.info("Adding block pool " + bpid);
+AddBlockPoolException volumeExceptions = new AddBlockPoolException();
 try (AutoCloseableLock lock = datasetLock.acquire()) {
-  volumes.addBlockPool(bpid, conf);
+  try {
+volumes.addBlockPool(bpid, conf);
+  } catch (AddBlockPoolException e) {
+volumeExceptions.mergeException(e);
+  }
   volumeMap.initBlockPool(bpid);
 }
-volumes.getAllVolumesMap(bpid, volumeMap, ramDiskReplicaTracker);
+try {
+  volumes.getAllVolumesMap(bpid, volumeMap, ramDiskReplicaTracker);
+} catch (AddBlockPoolException e) {
+  volumeExceptions.mergeException(e);
+}
+if (volumeExceptions.hasExceptions()) {
+  throw volumeExceptions;
+}
   }
 
   @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 85b85cf..049654b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -203,9 +203,6 @@ class FsVolumeList {
 long timeTaken = Time.monotonicNow() - startTime;
 FsDatasetImpl.LOG.info("Time to add replicas to map for 

[hadoop] branch branch-3.0 updated: HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

2019-08-08 Thread xkrogen
This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new 4c06b2f  HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed 
by Chao Sun.
4c06b2f is described below

commit 4c06b2f96c922f5122a35862a5e3750d30adf10c
Author: Chao Sun 
AuthorDate: Tue Jul 30 15:59:57 2019 -0700

HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

Signed-off-by: Wei-Chiu Chuang 

(cherry picked from 3ae775d74029b6ae82263739f598ceb25c597dcd)
(cherry picked from d38b617baaf10cb35f3a8eba904b930a142a071e)
(cherry picked from 90447baeb0c82d2602282c7369de2b603e16a93f)
---
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java |  59 ++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  15 ++
 .../hadoop/hdfs/web/resources/GetOpParam.java  |   1 +
 .../hadoop/fs/http/client/HttpFSFileSystem.java|  86 ++--
 .../apache/hadoop/fs/http/server/FSOperations.java |  84 +++-
 .../fs/http/server/HttpFSParametersProvider.java   |   1 +
 .../apache/hadoop/fs/http/server/HttpFSServer.java |   8 ++
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  |  73 --
 .../web/resources/NamenodeWebHdfsMethods.java  |   7 +
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  40 --
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 151 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java|  42 ++
 12 files changed, 513 insertions(+), 54 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 91eaae0..093ed02 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -408,30 +409,66 @@ class JsonUtilClient {
   return null;
 }
 
-final Map m = (Map)json.get(
-ContentSummary.class.getSimpleName());
+final Map m = (Map)
+json.get(ContentSummary.class.getSimpleName());
 final long length = ((Number) m.get("length")).longValue();
 final long fileCount = ((Number) m.get("fileCount")).longValue();
 final long directoryCount = ((Number) m.get("directoryCount")).longValue();
+ContentSummary.Builder builder = new ContentSummary.Builder()
+.length(length)
+.fileCount(fileCount)
+.directoryCount(directoryCount);
+builder = buildQuotaUsage(builder, m, ContentSummary.Builder.class);
+return builder.build();
+  }
+
+  /** Convert a JSON map to a QuotaUsage. */
+  static QuotaUsage toQuotaUsage(final Map json) {
+if (json == null) {
+  return null;
+}
+
+final Map m = (Map) json.get(QuotaUsage.class.getSimpleName());
+QuotaUsage.Builder builder = new QuotaUsage.Builder();
+builder = buildQuotaUsage(builder, m, QuotaUsage.Builder.class);
+return builder.build();
+  }
+
+  /**
+   * Given a builder for QuotaUsage, parse the provided map and
+   * construct the relevant fields. Return the updated builder.
+   */
+  private static  T buildQuotaUsage(
+  T builder, Map m, Class type) {
 final long quota = ((Number) m.get("quota")).longValue();
 final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
 final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
 final Map typem = (Map) m.get("typeQuota");
 
-ContentSummary.Builder contentSummaryBuilder =new ContentSummary.Builder()
-.length(length).fileCount(fileCount).directoryCount(directoryCount)
-.quota(quota).spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
+T result = type.cast(builder
+.quota(quota)
+.spaceConsumed(spaceConsumed)
+.spaceQuota(spaceQuota));
+
+// ContentSummary doesn't set this so check before using it
+if (m.get("fileAndDirectoryCount") != null) {
+  final long fileAndDirectoryCount =
+  ((Number) m.get("fileAndDirectoryCount")).longValue();
+  result = type.cast(result.fileAndDirectoryCount(fileAndDirectoryCount));
+}
+
 if (typem != null) {
   for (StorageType t : StorageType.getTypesSupportingQuota()) {
-Map type = (Map) typem.get(t.toString());
-if (type != null) {
- 

[hadoop] branch trunk updated: HADOOP-16479. ABFS FileStatus.getModificationTime returns localized time instead of UTC.

2019-08-08 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5840df8  HADOOP-16479. ABFS FileStatus.getModificationTime returns 
localized time instead of UTC.
5840df8 is described below

commit 5840df86d7a9e79007745688313f1d799d89189b
Author: bilaharith 
AuthorDate: Thu Aug 8 19:08:04 2019 +0100

HADOOP-16479. ABFS FileStatus.getModificationTime returns localized time 
instead of UTC.

Contributed by Bilahari T H

Change-Id: I532055baaadfd7c324710e4b25f60cdf0378bdc0
---
 .../hadoop/fs/azurebfs/AzureBlobFileSystemStore.java   |  2 +-
 .../azurebfs/ITestAzureBlobFileSystemFileStatus.java   | 18 ++
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index 2402dbc..138339c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -118,7 +118,7 @@ public class AzureBlobFileSystemStore implements Closeable {
   private URI uri;
   private String userName;
   private String primaryUserGroup;
-  private static final String DATE_TIME_PATTERN = "E, dd MMM  HH:mm:ss 
'GMT'";
+  private static final String DATE_TIME_PATTERN = "E, dd MMM  HH:mm:ss z";
   private static final String TOKEN_DATE_PATTERN = 
"-MM-dd'T'HH:mm:ss.SSS'Z'";
   private static final String XMS_PROPERTIES_ENCODING = "ISO-8859-1";
   private static final int LIST_MAX_RESULTS = 500;
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
index f514696..421fa9a 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
@@ -122,4 +122,22 @@ public class ITestAzureBlobFileSystemFileStatus extends
 assertEquals(pathWithHost2.getName(), fileStatus2.getPath().getName());
   }
 
+  @Test
+  public void testLastModifiedTime() throws IOException {
+AzureBlobFileSystem fs = this.getFileSystem();
+Path testFilePath = new Path("childfile1.txt");
+long createStartTime = System.currentTimeMillis();
+long minCreateStartTime = (createStartTime / 1000) * 1000 - 1;
+//  Dividing and multiplying by 1000 to make last 3 digits 0.
+//  It is observed that modification time is returned with last 3
+//  digits 0 always.
+fs.create(testFilePath);
+long createEndTime = System.currentTimeMillis();
+FileStatus fStat = fs.getFileStatus(testFilePath);
+long lastModifiedTime = fStat.getModificationTime();
+assertTrue("lastModifiedTime should be after minCreateStartTime",
+minCreateStartTime < lastModifiedTime);
+assertTrue("lastModifiedTime should be before createEndTime",
+createEndTime > lastModifiedTime);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

2019-08-08 Thread xkrogen
This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 90447ba  HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed 
by Chao Sun.
90447ba is described below

commit 90447baeb0c82d2602282c7369de2b603e16a93f
Author: Chao Sun 
AuthorDate: Tue Jul 30 15:59:57 2019 -0700

HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

Signed-off-by: Wei-Chiu Chuang 

(cherry picked from 3ae775d74029b6ae82263739f598ceb25c597dcd)
(cherry picked from d38b617baaf10cb35f3a8eba904b930a142a071e)
---
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java |  59 ++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  15 ++
 .../hadoop/hdfs/web/resources/GetOpParam.java  |   1 +
 .../hadoop/fs/http/client/HttpFSFileSystem.java|  86 ++--
 .../apache/hadoop/fs/http/server/FSOperations.java |  84 +++-
 .../fs/http/server/HttpFSParametersProvider.java   |   1 +
 .../apache/hadoop/fs/http/server/HttpFSServer.java |   8 ++
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  |  73 --
 .../web/resources/NamenodeWebHdfsMethods.java  |   7 +
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  40 --
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 151 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java|  42 ++
 12 files changed, 513 insertions(+), 54 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index a685573..458e013 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -423,30 +424,66 @@ class JsonUtilClient {
   return null;
 }
 
-final Map m = (Map)json.get(
-ContentSummary.class.getSimpleName());
+final Map m = (Map)
+json.get(ContentSummary.class.getSimpleName());
 final long length = ((Number) m.get("length")).longValue();
 final long fileCount = ((Number) m.get("fileCount")).longValue();
 final long directoryCount = ((Number) m.get("directoryCount")).longValue();
+ContentSummary.Builder builder = new ContentSummary.Builder()
+.length(length)
+.fileCount(fileCount)
+.directoryCount(directoryCount);
+builder = buildQuotaUsage(builder, m, ContentSummary.Builder.class);
+return builder.build();
+  }
+
+  /** Convert a JSON map to a QuotaUsage. */
+  static QuotaUsage toQuotaUsage(final Map json) {
+if (json == null) {
+  return null;
+}
+
+final Map m = (Map) json.get(QuotaUsage.class.getSimpleName());
+QuotaUsage.Builder builder = new QuotaUsage.Builder();
+builder = buildQuotaUsage(builder, m, QuotaUsage.Builder.class);
+return builder.build();
+  }
+
+  /**
+   * Given a builder for QuotaUsage, parse the provided map and
+   * construct the relevant fields. Return the updated builder.
+   */
+  private static  T buildQuotaUsage(
+  T builder, Map m, Class type) {
 final long quota = ((Number) m.get("quota")).longValue();
 final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
 final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
 final Map typem = (Map) m.get("typeQuota");
 
-ContentSummary.Builder contentSummaryBuilder =new ContentSummary.Builder()
-.length(length).fileCount(fileCount).directoryCount(directoryCount)
-.quota(quota).spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
+T result = type.cast(builder
+.quota(quota)
+.spaceConsumed(spaceConsumed)
+.spaceQuota(spaceQuota));
+
+// ContentSummary doesn't set this so check before using it
+if (m.get("fileAndDirectoryCount") != null) {
+  final long fileAndDirectoryCount =
+  ((Number) m.get("fileAndDirectoryCount")).longValue();
+  result = type.cast(result.fileAndDirectoryCount(fileAndDirectoryCount));
+}
+
 if (typem != null) {
   for (StorageType t : StorageType.getTypesSupportingQuota()) {
-Map type = (Map) typem.get(t.toString());
-if (type != null) {
-  contentSummaryBuilder = contentSummaryBuilder.typeQuota(t,
- 

[hadoop] branch branch-2 updated: HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

2019-08-08 Thread xkrogen
This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new ce12c8f  HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed 
by Chao Sun.
ce12c8f is described below

commit ce12c8fc54879ac985e73b104868495c3cdc80f2
Author: Erik Krogen 
AuthorDate: Thu Aug 8 10:18:00 2019 -0700

HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.
---
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java |  55 ++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  15 ++
 .../hadoop/hdfs/web/resources/GetOpParam.java  |   1 +
 .../hadoop/fs/http/client/HttpFSFileSystem.java|  86 ++--
 .../apache/hadoop/fs/http/server/FSOperations.java |  84 +++-
 .../fs/http/server/HttpFSParametersProvider.java   |   1 +
 .../apache/hadoop/fs/http/server/HttpFSServer.java |   8 ++
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  |  76 +--
 .../web/resources/NamenodeWebHdfsMethods.java  |   7 +
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  50 +--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 151 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java|  42 ++
 12 files changed, 522 insertions(+), 54 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 0320614..1fb7dea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -389,25 +390,61 @@ class JsonUtilClient {
 final long length = ((Number) m.get("length")).longValue();
 final long fileCount = ((Number) m.get("fileCount")).longValue();
 final long directoryCount = ((Number) m.get("directoryCount")).longValue();
+ContentSummary.Builder builder = new ContentSummary.Builder()
+.length(length)
+.fileCount(fileCount)
+.directoryCount(directoryCount);
+builder = buildQuotaUsage(builder, m, ContentSummary.Builder.class);
+return builder.build();
+  }
+
+  /** Convert a JSON map to a QuotaUsage. */
+  static QuotaUsage toQuotaUsage(final Map json) {
+if (json == null) {
+  return null;
+}
+
+final Map m = (Map) json.get(QuotaUsage.class.getSimpleName());
+QuotaUsage.Builder builder = new QuotaUsage.Builder();
+builder = buildQuotaUsage(builder, m, QuotaUsage.Builder.class);
+return builder.build();
+  }
+
+  /**
+   * Given a builder for QuotaUsage, parse the provided map and
+   * construct the relevant fields. Return the updated builder.
+   */
+  private static  T buildQuotaUsage(
+  T builder, Map m, Class type) {
 final long quota = ((Number) m.get("quota")).longValue();
 final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
 final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
 final Map typem = (Map) m.get("typeQuota");
 
-Builder contentSummaryBuilder = new ContentSummary.Builder().length(length)
-.fileCount(fileCount).directoryCount(directoryCount).quota(quota)
-.spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
+T result = type.cast(builder
+.quota(quota)
+.spaceConsumed(spaceConsumed)
+.spaceQuota(spaceQuota));
+
+// ContentSummary doesn't set this so check before using it
+if (m.get("fileAndDirectoryCount") != null) {
+  final long fileAndDirectoryCount =
+  ((Number) m.get("fileAndDirectoryCount")).longValue();
+  result = type.cast(result.fileAndDirectoryCount(fileAndDirectoryCount));
+}
+
 if (typem != null) {
   for (StorageType t : StorageType.getTypesSupportingQuota()) {
-Map type = (Map) typem.get(t.toString());
-if (type != null) {
-  contentSummaryBuilder = contentSummaryBuilder.typeQuota(t,
-  ((Number) type.get("quota")).longValue()).typeConsumed(t,
-  ((Number) type.get("consumed")).longValue());
+Map typeQuota = (Map) typem.get(t.toString());
+if (typeQuota != null) {
+  result = type.cast(result.typeQuota(t,
+  ((Number) typeQuota.get("quota")).longValue()).typeConsumed(t,
+  ((Number) 

[hadoop] branch branch-3.2 updated: HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

2019-08-08 Thread xkrogen
This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new d38b617  HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed 
by Chao Sun.
d38b617 is described below

commit d38b617baaf10cb35f3a8eba904b930a142a071e
Author: Chao Sun 
AuthorDate: Tue Jul 30 15:59:57 2019 -0700

HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

Signed-off-by: Wei-Chiu Chuang 

(cherry picked from 3ae775d74029b6ae82263739f598ceb25c597dcd)
---
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java |  59 ++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  15 ++
 .../hadoop/hdfs/web/resources/GetOpParam.java  |   1 +
 .../hadoop/fs/http/client/HttpFSFileSystem.java|  86 ++--
 .../apache/hadoop/fs/http/server/FSOperations.java |  84 +++-
 .../fs/http/server/HttpFSParametersProvider.java   |   1 +
 .../apache/hadoop/fs/http/server/HttpFSServer.java |   8 ++
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  |  73 --
 .../web/resources/NamenodeWebHdfsMethods.java  |   7 +
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  40 --
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 151 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java|  42 ++
 12 files changed, 513 insertions(+), 54 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 3889326..34ad50f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -426,30 +427,66 @@ public class JsonUtilClient {
   return null;
 }
 
-final Map m = (Map)json.get(
-ContentSummary.class.getSimpleName());
+final Map m = (Map)
+json.get(ContentSummary.class.getSimpleName());
 final long length = ((Number) m.get("length")).longValue();
 final long fileCount = ((Number) m.get("fileCount")).longValue();
 final long directoryCount = ((Number) m.get("directoryCount")).longValue();
+ContentSummary.Builder builder = new ContentSummary.Builder()
+.length(length)
+.fileCount(fileCount)
+.directoryCount(directoryCount);
+builder = buildQuotaUsage(builder, m, ContentSummary.Builder.class);
+return builder.build();
+  }
+
+  /** Convert a JSON map to a QuotaUsage. */
+  static QuotaUsage toQuotaUsage(final Map json) {
+if (json == null) {
+  return null;
+}
+
+final Map m = (Map) json.get(QuotaUsage.class.getSimpleName());
+QuotaUsage.Builder builder = new QuotaUsage.Builder();
+builder = buildQuotaUsage(builder, m, QuotaUsage.Builder.class);
+return builder.build();
+  }
+
+  /**
+   * Given a builder for QuotaUsage, parse the provided map and
+   * construct the relevant fields. Return the updated builder.
+   */
+  private static  T buildQuotaUsage(
+  T builder, Map m, Class type) {
 final long quota = ((Number) m.get("quota")).longValue();
 final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
 final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
 final Map typem = (Map) m.get("typeQuota");
 
-ContentSummary.Builder contentSummaryBuilder =new ContentSummary.Builder()
-.length(length).fileCount(fileCount).directoryCount(directoryCount)
-.quota(quota).spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
+T result = type.cast(builder
+.quota(quota)
+.spaceConsumed(spaceConsumed)
+.spaceQuota(spaceQuota));
+
+// ContentSummary doesn't set this so check before using it
+if (m.get("fileAndDirectoryCount") != null) {
+  final long fileAndDirectoryCount =
+  ((Number) m.get("fileAndDirectoryCount")).longValue();
+  result = type.cast(result.fileAndDirectoryCount(fileAndDirectoryCount));
+}
+
 if (typem != null) {
   for (StorageType t : StorageType.getTypesSupportingQuota()) {
-Map type = (Map) typem.get(t.toString());
-if (type != null) {
-  contentSummaryBuilder = contentSummaryBuilder.typeQuota(t,
-  ((Number) 

[hadoop] branch trunk updated: HDDS-1619. Support volume acl operations for OM HA. Contributed by… (#1147)

2019-08-08 Thread xyao
This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3ac0f3a  HDDS-1619. Support volume acl operations for OM HA. 
Contributed by… (#1147)
3ac0f3a is described below

commit 3ac0f3a0c1d982bfab13de0112b7bae778d19a74
Author: Xiaoyu Yao 
AuthorDate: Thu Aug 8 09:55:46 2019 -0700

HDDS-1619. Support volume acl operations for OM HA. Contributed by… (#1147)
---
 .../om/ratis/utils/OzoneManagerRatisUtils.java |  33 -
 .../om/request/volume/acl/OMVolumeAclRequest.java  | 157 +
 .../request/volume/acl/OMVolumeAddAclRequest.java  | 110 +++
 .../volume/acl/OMVolumeRemoveAclRequest.java   | 109 ++
 .../request/volume/acl/OMVolumeSetAclRequest.java  | 106 ++
 .../ozone/om/request/volume/acl/package-info.java  |  22 +++
 .../om/response/volume/OMVolumeAclOpResponse.java  |  68 +
 .../OzoneManagerHARequestHandlerImpl.java  |  17 ++-
 .../ozone/om/request/TestOMRequestUtils.java   |  65 -
 .../volume/acl/TestOMVolumeAddAclRequest.java  | 123 
 .../volume/acl/TestOMVolumeRemoveAclRequest.java   | 133 +
 .../volume/acl/TestOMVolumeSetAclRequest.java  | 136 ++
 .../ozone/om/request/volume/acl/package-info.java  |  21 +++
 13 files changed, 1093 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index aef189c..460daaa 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -41,9 +41,12 @@ import 
org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeDeleteRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetOwnerRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetQuotaRequest;
+import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeAddAclRequest;
+import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeRemoveAclRequest;
+import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeSetAclRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 
@@ -117,12 +120,38 @@ public final class OzoneManagerRatisUtils {
   return new S3MultipartUploadAbortRequest(omRequest);
 case CompleteMultiPartUpload:
   return new S3MultipartUploadCompleteRequest(omRequest);
+case AddAcl:
+case RemoveAcl:
+case SetAcl:
+  return getOMAclRequest(omRequest);
 default:
   // TODO: will update once all request types are implemented.
   return null;
 }
   }
 
+  private static OMClientRequest getOMAclRequest(OMRequest omRequest) {
+Type cmdType = omRequest.getCmdType();
+if (Type.AddAcl == cmdType) {
+  ObjectType type = omRequest.getAddAclRequest().getObj().getResType();
+  if (ObjectType.VOLUME == type) {
+return new OMVolumeAddAclRequest(omRequest);
+  }
+} else if (Type.RemoveAcl == cmdType) {
+  ObjectType type = omRequest.getAddAclRequest().getObj().getResType();
+  if (ObjectType.VOLUME == type) {
+return new OMVolumeRemoveAclRequest(omRequest);
+  }
+} else if (Type.SetAcl == cmdType) {
+  ObjectType type = omRequest.getAddAclRequest().getObj().getResType();
+  if (ObjectType.VOLUME == type) {
+return new OMVolumeSetAclRequest(omRequest);
+  }
+}
+//TODO: handle bucket, key and prefix AddAcl
+return null;
+  }
+
   /**
* Convert exception result to {@link OzoneManagerProtocolProtos.Status}.
* @param exception
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
new file mode 100644
index 000..4d2a851
--- /dev/null
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
@@ -0,0 +1,157 @@
+package org.apache.hadoop.ozone.om.request.volume.acl;
+
+import 

[hadoop] branch ozone-0.4.1 updated: HDDS-1926. The new caching layer is used for old OM requests but not updated

2019-08-08 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new eb828dc  HDDS-1926. The new caching layer is used for old OM requests 
but not updated
eb828dc is described below

commit eb828dc1e40bf526b9b7d3e0a40b228b68bc76c8
Author: Bharat Viswanadham 
AuthorDate: Thu Aug 8 15:52:04 2019 +0200

HDDS-1926. The new caching layer is used for old OM requests but not updated

Closes #1247
---
 .../hadoop/ozone/om/TestOzoneManagerRestart.java   | 214 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java |  18 +-
 2 files changed, 230 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
new file mode 100644
index 000..76841dd
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
@@ -0,0 +1,214 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.junit.Assert.fail;
+
+/**
+ * Test some client operations after cluster starts. And perform restart and
+ * then performs client operations and check the behavior is expected or not.
+ */
+public class TestOzoneManagerRestart {
+  private MiniOzoneCluster cluster = null;
+  private UserArgs userArgs;
+  private OzoneConfiguration conf;
+  private String clusterId;
+  private String scmId;
+  private String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(6);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * 
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @Before
+  public void init() throws Exception {
+conf = new OzoneConfiguration();
+clusterId = UUID.randomUUID().toString();
+scmId = UUID.randomUUID().toString();
+omId = UUID.randomUUID().toString();
+conf.setBoolean(OZONE_ACL_ENABLED, true);
+conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
+conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
+cluster =  MiniOzoneCluster.newBuilder(conf)
+.setClusterId(clusterId)
+.setScmId(scmId)
+.setOmId(omId)
+.build();
+cluster.waitForClusterToBeReady();
+userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+null, null, null, null);
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+if (cluster != null) {
+  cluster.shutdown();
+}
+  }
+
+  @Test
+  public void testRestartOMWithVolumeOperation() throws Exception {
+String volumeName = "volume" + 

[hadoop] branch trunk updated: HDDS-1926. The new caching layer is used for old OM requests but not updated

2019-08-08 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 63161cf  HDDS-1926. The new caching layer is used for old OM requests 
but not updated
63161cf is described below

commit 63161cf590d43fe7f6c905946b029d893b774d77
Author: Bharat Viswanadham 
AuthorDate: Thu Aug 8 15:52:04 2019 +0200

HDDS-1926. The new caching layer is used for old OM requests but not updated

Closes #1247
---
 .../hadoop/ozone/om/TestOzoneManagerRestart.java   | 214 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java |  18 +-
 2 files changed, 230 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
new file mode 100644
index 000..76841dd
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
@@ -0,0 +1,214 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.junit.Assert.fail;
+
+/**
+ * Test some client operations after cluster starts. And perform restart and
+ * then performs client operations and check the behavior is expected or not.
+ */
+public class TestOzoneManagerRestart {
+  private MiniOzoneCluster cluster = null;
+  private UserArgs userArgs;
+  private OzoneConfiguration conf;
+  private String clusterId;
+  private String scmId;
+  private String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(6);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * 
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @Before
+  public void init() throws Exception {
+conf = new OzoneConfiguration();
+clusterId = UUID.randomUUID().toString();
+scmId = UUID.randomUUID().toString();
+omId = UUID.randomUUID().toString();
+conf.setBoolean(OZONE_ACL_ENABLED, true);
+conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
+conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
+cluster =  MiniOzoneCluster.newBuilder(conf)
+.setClusterId(clusterId)
+.setScmId(scmId)
+.setOmId(omId)
+.build();
+cluster.waitForClusterToBeReady();
+userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+null, null, null, null);
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+if (cluster != null) {
+  cluster.shutdown();
+}
+  }
+
+  @Test
+  public void testRestartOMWithVolumeOperation() throws Exception {
+String volumeName = "volume" + 

[hadoop] branch trunk updated: YARN-9711. Missing spaces in NMClientImpl (#1177) Contributed by Charles Xu.

2019-08-08 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9e6519a  YARN-9711. Missing spaces in NMClientImpl (#1177) Contributed 
by Charles Xu.
9e6519a is described below

commit 9e6519a11a1689d6c213d281b594745f4dc82895
Author: Charles Xu 
AuthorDate: Thu Aug 8 21:41:04 2019 +0800

YARN-9711. Missing spaces in NMClientImpl (#1177) Contributed by Charles Xu.
---
 .../java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
index 96a93c2..fcc48e2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
@@ -130,11 +130,11 @@ public class NMClientImpl extends NMClient {
   } catch (YarnException e) {
 LOG.error("Failed to stop Container " +
 startedContainer.getContainerId() +
-"when stopping NMClientImpl");
+" when stopping NMClientImpl");
   } catch (IOException e) {
 LOG.error("Failed to stop Container " +
 startedContainer.getContainerId() +
-"when stopping NMClientImpl");
+" when stopping NMClientImpl");
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9685: NPE when rendering the info table of leaf queue in non-accessible partitions. Contributed by Tao Yang.

2019-08-08 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new b131214  YARN-9685: NPE when rendering the info table of leaf queue in 
non-accessible partitions. Contributed by Tao Yang.
b131214 is described below

commit b1312146850469017c77a9bf82120226621f233f
Author: Eric E Payne 
AuthorDate: Thu Aug 8 12:37:50 2019 +

YARN-9685: NPE when rendering the info table of leaf queue in 
non-accessible partitions. Contributed by Tao Yang.

(cherry picked from commit 3b38f2019e4f8d056580f3ed67ecef591011d7a6)
---
 .../webapp/CapacitySchedulerPage.java  | 24 ++
 .../webapp/dao/PartitionQueueCapacitiesInfo.java   |  3 ++-
 2 files changed, 18 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index ed2f64e..8f68e83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -157,10 +157,12 @@ class CapacitySchedulerPage extends RmView {
   : resourceUsages.getAmUsed();
   ri.
   __("Used Capacity:",
-  appendPercent(resourceUsages.getUsed().toString(),
+  appendPercent(resourceUsages.getUsed(),
   capacities.getUsedCapacity() / 100))
   .__("Configured Capacity:",
-  capacities.getConfiguredMinResource().toString())
+  capacities.getConfiguredMinResource() == null ?
+  Resources.none().toString() :
+  capacities.getConfiguredMinResource().toString())
   .__("Configured Max Capacity:",
   (capacities.getConfiguredMaxResource() == null
   || capacities.getConfiguredMaxResource().getResource()
@@ -168,10 +170,10 @@ class CapacitySchedulerPage extends RmView {
   ? "unlimited"
   : capacities.getConfiguredMaxResource().toString())
   .__("Effective Capacity:",
-  appendPercent(capacities.getEffectiveMinResource().toString(),
+  appendPercent(capacities.getEffectiveMinResource(),
   capacities.getCapacity() / 100))
   .__("Effective Max Capacity:",
-  appendPercent(capacities.getEffectiveMaxResource().toString(),
+  appendPercent(capacities.getEffectiveMaxResource(),
   capacities.getMaxCapacity() / 100))
   .__("Absolute Used Capacity:",
   percent(capacities.getAbsoluteUsedCapacity() / 100))
@@ -320,6 +322,8 @@ class CapacitySchedulerPage extends RmView {
 boolean isAutoCreatedLeafQueue = info.isLeafQueue() ?
 ((CapacitySchedulerLeafQueueInfo) info).isAutoCreatedLeafQueue()
 : false;
+float capPercent = absMaxCap == 0 ? 0 : absCap/absMaxCap;
+float usedCapPercent = absMaxCap == 0 ? 0 : absUsedCap/absMaxCap;
 
 String Q_WIDTH = width(absMaxCap * Q_MAX_WIDTH);
 LI> li = ul.
@@ -328,9 +332,9 @@ class CapacitySchedulerPage extends RmView {
 Q_WIDTH)
 :  Q_WIDTH).
   $title(join("Absolute Capacity:", percent(absCap))).
-  span().$style(join(Q_GIVEN, ";font-size:1px;", 
width(absCap/absMaxCap))).
+  span().$style(join(Q_GIVEN, ";font-size:1px;", 
width(capPercent))).
 __('.').__().
-  span().$style(join(width(absUsedCap/absMaxCap),
+  span().$style(join(width(usedCapPercent),
 ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : 
Q_UNDER)).
 __('.').__().
   span(".q", "Queue: "+info.getQueuePath().substring(5)).__().
@@ -658,8 +662,12 @@ class CapacitySchedulerPage extends RmView {
 return QueuesBlock.class;
   }
 
-  static String appendPercent(String message, float f) {
-return message + " (" + StringUtils.formatPercent(f, 1) + ")";
+  static String appendPercent(ResourceInfo resourceInfo, float f) {
+if (resourceInfo == null) {
+  return "";
+}
+return resourceInfo.toString() + " ("
++ StringUtils.formatPercent(f, 1) + ")";
   }
 
   static String percent(float f) {
diff --git 

[hadoop] branch branch-3.2 updated: YARN-9685: NPE when rendering the info table of leaf queue in non-accessible partitions. Contributed by Tao Yang.

2019-08-08 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new e47c483  YARN-9685: NPE when rendering the info table of leaf queue in 
non-accessible partitions. Contributed by Tao Yang.
e47c483 is described below

commit e47c483d9f0359a83eb230c93dda9fbcf25edcda
Author: Eric E Payne 
AuthorDate: Thu Aug 8 12:37:50 2019 +

YARN-9685: NPE when rendering the info table of leaf queue in 
non-accessible partitions. Contributed by Tao Yang.

(cherry picked from commit 3b38f2019e4f8d056580f3ed67ecef591011d7a6)
---
 .../webapp/CapacitySchedulerPage.java  | 24 ++
 .../webapp/dao/PartitionQueueCapacitiesInfo.java   |  3 ++-
 2 files changed, 18 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index ed2f64e..8f68e83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -157,10 +157,12 @@ class CapacitySchedulerPage extends RmView {
   : resourceUsages.getAmUsed();
   ri.
   __("Used Capacity:",
-  appendPercent(resourceUsages.getUsed().toString(),
+  appendPercent(resourceUsages.getUsed(),
   capacities.getUsedCapacity() / 100))
   .__("Configured Capacity:",
-  capacities.getConfiguredMinResource().toString())
+  capacities.getConfiguredMinResource() == null ?
+  Resources.none().toString() :
+  capacities.getConfiguredMinResource().toString())
   .__("Configured Max Capacity:",
   (capacities.getConfiguredMaxResource() == null
   || capacities.getConfiguredMaxResource().getResource()
@@ -168,10 +170,10 @@ class CapacitySchedulerPage extends RmView {
   ? "unlimited"
   : capacities.getConfiguredMaxResource().toString())
   .__("Effective Capacity:",
-  appendPercent(capacities.getEffectiveMinResource().toString(),
+  appendPercent(capacities.getEffectiveMinResource(),
   capacities.getCapacity() / 100))
   .__("Effective Max Capacity:",
-  appendPercent(capacities.getEffectiveMaxResource().toString(),
+  appendPercent(capacities.getEffectiveMaxResource(),
   capacities.getMaxCapacity() / 100))
   .__("Absolute Used Capacity:",
   percent(capacities.getAbsoluteUsedCapacity() / 100))
@@ -320,6 +322,8 @@ class CapacitySchedulerPage extends RmView {
 boolean isAutoCreatedLeafQueue = info.isLeafQueue() ?
 ((CapacitySchedulerLeafQueueInfo) info).isAutoCreatedLeafQueue()
 : false;
+float capPercent = absMaxCap == 0 ? 0 : absCap/absMaxCap;
+float usedCapPercent = absMaxCap == 0 ? 0 : absUsedCap/absMaxCap;
 
 String Q_WIDTH = width(absMaxCap * Q_MAX_WIDTH);
 LI> li = ul.
@@ -328,9 +332,9 @@ class CapacitySchedulerPage extends RmView {
 Q_WIDTH)
 :  Q_WIDTH).
   $title(join("Absolute Capacity:", percent(absCap))).
-  span().$style(join(Q_GIVEN, ";font-size:1px;", 
width(absCap/absMaxCap))).
+  span().$style(join(Q_GIVEN, ";font-size:1px;", 
width(capPercent))).
 __('.').__().
-  span().$style(join(width(absUsedCap/absMaxCap),
+  span().$style(join(width(usedCapPercent),
 ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : 
Q_UNDER)).
 __('.').__().
   span(".q", "Queue: "+info.getQueuePath().substring(5)).__().
@@ -658,8 +662,12 @@ class CapacitySchedulerPage extends RmView {
 return QueuesBlock.class;
   }
 
-  static String appendPercent(String message, float f) {
-return message + " (" + StringUtils.formatPercent(f, 1) + ")";
+  static String appendPercent(ResourceInfo resourceInfo, float f) {
+if (resourceInfo == null) {
+  return "";
+}
+return resourceInfo.toString() + " ("
++ StringUtils.formatPercent(f, 1) + ")";
   }
 
   static String percent(float f) {
diff --git 

[hadoop] branch trunk updated: YARN-9685: NPE when rendering the info table of leaf queue in non-accessible partitions. Contributed by Tao Yang.

2019-08-08 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3b38f20  YARN-9685: NPE when rendering the info table of leaf queue in 
non-accessible partitions. Contributed by Tao Yang.
3b38f20 is described below

commit 3b38f2019e4f8d056580f3ed67ecef591011d7a6
Author: Eric E Payne 
AuthorDate: Thu Aug 8 12:37:50 2019 +

YARN-9685: NPE when rendering the info table of leaf queue in 
non-accessible partitions. Contributed by Tao Yang.
---
 .../webapp/CapacitySchedulerPage.java  | 24 ++
 .../webapp/dao/PartitionQueueCapacitiesInfo.java   |  3 ++-
 2 files changed, 18 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index ed2f64e..8f68e83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -157,10 +157,12 @@ class CapacitySchedulerPage extends RmView {
   : resourceUsages.getAmUsed();
   ri.
   __("Used Capacity:",
-  appendPercent(resourceUsages.getUsed().toString(),
+  appendPercent(resourceUsages.getUsed(),
   capacities.getUsedCapacity() / 100))
   .__("Configured Capacity:",
-  capacities.getConfiguredMinResource().toString())
+  capacities.getConfiguredMinResource() == null ?
+  Resources.none().toString() :
+  capacities.getConfiguredMinResource().toString())
   .__("Configured Max Capacity:",
   (capacities.getConfiguredMaxResource() == null
   || capacities.getConfiguredMaxResource().getResource()
@@ -168,10 +170,10 @@ class CapacitySchedulerPage extends RmView {
   ? "unlimited"
   : capacities.getConfiguredMaxResource().toString())
   .__("Effective Capacity:",
-  appendPercent(capacities.getEffectiveMinResource().toString(),
+  appendPercent(capacities.getEffectiveMinResource(),
   capacities.getCapacity() / 100))
   .__("Effective Max Capacity:",
-  appendPercent(capacities.getEffectiveMaxResource().toString(),
+  appendPercent(capacities.getEffectiveMaxResource(),
   capacities.getMaxCapacity() / 100))
   .__("Absolute Used Capacity:",
   percent(capacities.getAbsoluteUsedCapacity() / 100))
@@ -320,6 +322,8 @@ class CapacitySchedulerPage extends RmView {
 boolean isAutoCreatedLeafQueue = info.isLeafQueue() ?
 ((CapacitySchedulerLeafQueueInfo) info).isAutoCreatedLeafQueue()
 : false;
+float capPercent = absMaxCap == 0 ? 0 : absCap/absMaxCap;
+float usedCapPercent = absMaxCap == 0 ? 0 : absUsedCap/absMaxCap;
 
 String Q_WIDTH = width(absMaxCap * Q_MAX_WIDTH);
 LI> li = ul.
@@ -328,9 +332,9 @@ class CapacitySchedulerPage extends RmView {
 Q_WIDTH)
 :  Q_WIDTH).
   $title(join("Absolute Capacity:", percent(absCap))).
-  span().$style(join(Q_GIVEN, ";font-size:1px;", 
width(absCap/absMaxCap))).
+  span().$style(join(Q_GIVEN, ";font-size:1px;", 
width(capPercent))).
 __('.').__().
-  span().$style(join(width(absUsedCap/absMaxCap),
+  span().$style(join(width(usedCapPercent),
 ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : 
Q_UNDER)).
 __('.').__().
   span(".q", "Queue: "+info.getQueuePath().substring(5)).__().
@@ -658,8 +662,12 @@ class CapacitySchedulerPage extends RmView {
 return QueuesBlock.class;
   }
 
-  static String appendPercent(String message, float f) {
-return message + " (" + StringUtils.formatPercent(f, 1) + ")";
+  static String appendPercent(ResourceInfo resourceInfo, float f) {
+if (resourceInfo == null) {
+  return "";
+}
+return resourceInfo.toString() + " ("
++ StringUtils.formatPercent(f, 1) + ")";
   }
 
   static String percent(float f) {
diff --git 

[hadoop] branch ozone-0.4.1 updated: HDDS-1888. Add containers to node2container map in SCM as part of ICR processing.

2019-08-08 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 033c175  HDDS-1888. Add containers to node2container map in SCM as 
part of ICR processing.
033c175 is described below

commit 033c175cd682545340211cb6e0b08b5a45572e82
Author: Nanda kumar 
AuthorDate: Thu Aug 8 15:22:03 2019 +0530

HDDS-1888. Add containers to node2container map in SCM as part of ICR 
processing.

Signed-off-by: Nanda kumar 
(cherry picked from commit 397a5633af767eee99083c0ac4a8d4282f651911)
---
 .../IncrementalContainerReportHandler.java | 16 -
 .../apache/hadoop/hdds/scm/node/NodeManager.java   | 11 
 .../hadoop/hdds/scm/node/NodeStateManager.java | 15 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java   |  7 +++
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  | 72 +-
 .../hdds/scm/server/StorageContainerManager.java   |  3 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java | 13 
 .../TestIncrementalContainerReportHandler.java |  9 ++-
 .../testutils/ReplicationNodeManagerMock.java  |  7 +++
 9 files changed, 132 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index a7efb55..3dd3d9d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos
 .ContainerReplicaProto;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
 .IncrementalContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -39,9 +42,13 @@ public class IncrementalContainerReportHandler extends
   private static final Logger LOG = LoggerFactory.getLogger(
   IncrementalContainerReportHandler.class);
 
+  private final NodeManager nodeManager;
+
   public IncrementalContainerReportHandler(
+  final NodeManager nodeManager,
   final ContainerManager containerManager)  {
 super(containerManager, LOG);
+this.nodeManager = nodeManager;
   }
 
   @Override
@@ -53,9 +60,16 @@ public class IncrementalContainerReportHandler extends
 for (ContainerReplicaProto replicaProto :
 report.getReport().getReportList()) {
   try {
-processContainerReplica(report.getDatanodeDetails(), replicaProto);
+final DatanodeDetails dd = report.getDatanodeDetails();
+final ContainerID id = ContainerID.valueof(
+replicaProto.getContainerID());
+nodeManager.addContainer(dd, id);
+processContainerReplica(dd, replicaProto);
   } catch (ContainerNotFoundException e) {
 LOG.warn("Container {} not found!", replicaProto.getContainerID());
+  } catch (NodeNotFoundException ex) {
+LOG.error("Received ICR from unknown datanode {} {}",
+report.getDatanodeDetails(), ex);
   } catch (IOException e) {
 LOG.error("Exception while processing ICR for container {}",
 replicaProto.getContainerID());
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 0ccbb82..d8890fb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -130,6 +130,17 @@ public interface NodeManager extends 
StorageContainerNodeProtocol,
   void removePipeline(Pipeline pipeline);
 
   /**
+   * Adds the given container to the specified datanode.
+   *
+   * @param datanodeDetails - DatanodeDetails
+   * @param containerId - containerID
+   * @throws NodeNotFoundException - if datanode is not known. For new datanode
+   *use addDatanodeInContainerMap call.
+   */
+  void addContainer(DatanodeDetails datanodeDetails,
+ContainerID containerId) throws NodeNotFoundException;
+
+  /**
* Remaps datanode to containers mapping to the new set of containers.
* @param datanodeDetails - DatanodeDetails
* @param containerIds - Set of containerIDs
diff --git 

[hadoop] branch trunk updated: HDDS-1888. Add containers to node2container map in SCM as part of ICR processing.

2019-08-08 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 397a563  HDDS-1888. Add containers to node2container map in SCM as 
part of ICR processing.
397a563 is described below

commit 397a5633af767eee99083c0ac4a8d4282f651911
Author: Nanda kumar 
AuthorDate: Thu Aug 8 15:22:03 2019 +0530

HDDS-1888. Add containers to node2container map in SCM as part of ICR 
processing.

Signed-off-by: Nanda kumar 
---
 .../IncrementalContainerReportHandler.java | 16 -
 .../apache/hadoop/hdds/scm/node/NodeManager.java   | 11 
 .../hadoop/hdds/scm/node/NodeStateManager.java | 15 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java   |  7 +++
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  | 72 +-
 .../hdds/scm/server/StorageContainerManager.java   |  3 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java | 13 
 .../TestIncrementalContainerReportHandler.java |  9 ++-
 .../testutils/ReplicationNodeManagerMock.java  |  7 +++
 9 files changed, 132 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index a7efb55..3dd3d9d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos
 .ContainerReplicaProto;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
 .IncrementalContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -39,9 +42,13 @@ public class IncrementalContainerReportHandler extends
   private static final Logger LOG = LoggerFactory.getLogger(
   IncrementalContainerReportHandler.class);
 
+  private final NodeManager nodeManager;
+
   public IncrementalContainerReportHandler(
+  final NodeManager nodeManager,
   final ContainerManager containerManager)  {
 super(containerManager, LOG);
+this.nodeManager = nodeManager;
   }
 
   @Override
@@ -53,9 +60,16 @@ public class IncrementalContainerReportHandler extends
 for (ContainerReplicaProto replicaProto :
 report.getReport().getReportList()) {
   try {
-processContainerReplica(report.getDatanodeDetails(), replicaProto);
+final DatanodeDetails dd = report.getDatanodeDetails();
+final ContainerID id = ContainerID.valueof(
+replicaProto.getContainerID());
+nodeManager.addContainer(dd, id);
+processContainerReplica(dd, replicaProto);
   } catch (ContainerNotFoundException e) {
 LOG.warn("Container {} not found!", replicaProto.getContainerID());
+  } catch (NodeNotFoundException ex) {
+LOG.error("Received ICR from unknown datanode {} {}",
+report.getDatanodeDetails(), ex);
   } catch (IOException e) {
 LOG.error("Exception while processing ICR for container {}",
 replicaProto.getContainerID());
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 0ccbb82..d8890fb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -130,6 +130,17 @@ public interface NodeManager extends 
StorageContainerNodeProtocol,
   void removePipeline(Pipeline pipeline);
 
   /**
+   * Adds the given container to the specified datanode.
+   *
+   * @param datanodeDetails - DatanodeDetails
+   * @param containerId - containerID
+   * @throws NodeNotFoundException - if datanode is not known. For new datanode
+   *use addDatanodeInContainerMap call.
+   */
+  void addContainer(DatanodeDetails datanodeDetails,
+ContainerID containerId) throws NodeNotFoundException;
+
+  /**
* Remaps datanode to containers mapping to the new set of containers.
* @param datanodeDetails - DatanodeDetails
* @param containerIds - Set of containerIDs
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
 

[hadoop] branch ozone-0.4.1 updated: HDDS-1925. ozonesecure acceptance test broken by HTTP auth requirement (#1248)

2019-08-08 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new ed389df  HDDS-1925. ozonesecure acceptance test broken by HTTP auth 
requirement (#1248)
ed389df is described below

commit ed389df61562a4437530c6a77d206473420a205b
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Thu Aug 8 02:07:15 2019 +0200

HDDS-1925. ozonesecure acceptance test broken by HTTP auth requirement 
(#1248)

(cherry picked from commit ab6a5c9d07a50b49d696b983e1a1cd4f9ef2a44d)
---
 hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh   |  2 +-
 hadoop-ozone/dist/src/main/compose/testlib.sh| 16 +++-
 hadoop-ozone/dist/src/main/smoketest/basic/basic.robot   |  5 ++---
 .../dist/src/main/smoketest/basic/ozone-shell.robot  |  1 +
 hadoop-ozone/dist/src/main/smoketest/commonlib.robot |  6 +-
 .../dist/src/main/smoketest/s3/commonawslib.robot|  1 +
 hadoop-ozone/dist/src/main/smoketest/s3/webui.robot  |  7 ---
 7 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index f13f010..01106b8 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -33,7 +33,7 @@ execute_robot_test scm security
 
 execute_robot_test scm ozonefs/ozonefs.robot
 
-execute_robot_test scm s3
+execute_robot_test s3g s3
 
 stop_docker_env
 
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh 
b/hadoop-ozone/dist/src/main/compose/testlib.sh
index 065c53f..462b9fa 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -28,6 +28,20 @@ mkdir -p "$RESULT_DIR"
 #Should be writeable from the docker containers where user is different.
 chmod ogu+w "$RESULT_DIR"
 
+## @description print the number of datanodes up
+## @param the docker-compose file
+count_datanodes() {
+  local compose_file=$1
+
+  local 
jmx_url='http://scm:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo'
+  if [[ "${SECURITY_ENABLED}" == 'true' ]]; then
+docker-compose -f "${compose_file}" exec -T scm bash -c "kinit -k 
HTTP/s...@example.com -t /etc/security/keytabs/HTTP.keytab && curl --negotiate 
-u : -s '${jmx_url}'"
+  else
+docker-compose -f "${compose_file}" exec -T scm curl -s "${jmx_url}"
+  fi \
+| jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
+}
+
 ## @description wait until datanodes are up (or 30 seconds)
 ## @param the docker-compose file
 ## @param number of datanodes to wait for (default: 3)
@@ -43,7 +57,7 @@ wait_for_datanodes(){
 
  #This line checks the number of HEALTHY datanodes registered in scm over 
the
  # jmx HTTP servlet
- datanodes=$(docker-compose -f "${compose_file}" exec -T scm curl -s 
'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo'
 | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
+ datanodes=$(count_datanodes "${compose_file}")
  if [[ "$datanodes" ]]; then
if [[ ${datanodes} -ge ${datanode_count} ]]; then
 
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
index 88af097..c750521 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
@@ -25,9 +25,8 @@ ${DATANODE_HOST}datanode
 *** Test Cases ***
 
 Check webui static resources
-${result} =Executecurl -s -I 
http://scm:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
-   Should contain ${result}200
-${result} =Executecurl -s -I 
http://om:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
+Run Keyword if'${SECURITY_ENABLED}' == 'true'Kinit HTTP user
+${result} =Executecurl --negotiate -u : -s -I 
http://scm:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
Should contain ${result}200
 
 Start freon testing
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
index 690fa26..60a3f04 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
@@ -17,6 +17,7 @@
 Documentation   Test ozone shell CLI usage
 Library OperatingSystem
 Resource../commonlib.robot
+Test Setup  Run Keyword if'${SECURITY_ENABLED}' == 'true'Kinit 
test user testuser testuser.keytab
 Test Timeout2 minute
 
 *** Variables ***
diff --git