hadoop git commit: HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul Kumar Singh.

2018-11-14 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fade865c -> a94828170


HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9482817
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9482817
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9482817

Branch: refs/heads/trunk
Commit: a94828170684793b80efdd76dc8a3167e324c0ea
Parents: 3fade86
Author: Shashikant Banerjee 
Authored: Wed Nov 14 15:53:22 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 14 15:53:22 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  2 +-
 .../common/src/main/resources/ozone-default.xml |  4 +--
 .../server/ratis/ContainerStateMachine.java | 27 +++-
 3 files changed, 24 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index cedcc43..b748d69 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -74,7 +74,7 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
   "dfs.container.ratis.segment.size";
   public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-  1 * 1024 * 1024 * 1024;
+  16 * 1024;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
   public static final int

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 54bffd5..e94e7e1 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -175,10 +175,10 @@
   
   
 dfs.container.ratis.segment.size
-1073741824
+16384
 OZONE, RATIS, PERFORMANCE
 The size of the raft segment used by Apache Ratis on 
datanodes.
-  (1 GB by default)
+  (16 KB by default)
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 3899bde..a3b496a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -120,7 +120,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   createContainerFutureMap;
   private ExecutorService[] executors;
   private final int numExecutors;
-  private final Map containerCommandCompletionMap;
+  private final Map applyTransactionCompletionMap;
+  private long lastIndex;
   /**
* CSM metrics.
*/
@@ -138,7 +139,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 this.executors = executors.toArray(new ExecutorService[numExecutors]);
 this.writeChunkFutureMap = new ConcurrentHashMap<>();
 this.createContainerFutureMap = new ConcurrentHashMap<>();
-containerCommandCompletionMap = new ConcurrentHashMap<>();
+applyTransactionCompletionMap = new ConcurrentHashMap<>();
+this.lastIndex = RaftServerConstants.INVALID_LOG_INDEX;
   }
 
   @Override
@@ -162,10 +164,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 
   private long loadSnapshot(SingleFileSnapshotInfo snapshot) {
 if (snapshot == null) {
-  TermIndex empty = TermIndex.newTermIndex(0, 0);
+  TermIndex empty = TermIndex.newTermIndex(0,
+  RaftServerConstants.INVALID_LOG_INDEX);
   LOG.info("The snapshot info is null." +
   "Setting the last applied index to:" + empty);
   

hadoop git commit: HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul Kumar Singh.

2018-11-14 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 a2fa8324d -> 3923a4a27


HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3923a4a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3923a4a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3923a4a2

Branch: refs/heads/ozone-0.3
Commit: 3923a4a279565108ac4ad341c031173aa967c603
Parents: a2fa832
Author: Shashikant Banerjee 
Authored: Wed Nov 14 15:50:46 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 14 15:50:46 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  2 +-
 .../common/src/main/resources/ozone-default.xml |  4 +--
 .../server/ratis/ContainerStateMachine.java | 30 ++--
 3 files changed, 25 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3923a4a2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index f2cebe9..38c41ba 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -69,7 +69,7 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
   "dfs.container.ratis.segment.size";
   public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-  1 * 1024 * 1024 * 1024;
+  16 * 1024;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
   public static final int

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3923a4a2/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 512b3ee..4a72d39 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -175,10 +175,10 @@
   
   
 dfs.container.ratis.segment.size
-1073741824
+16384
 OZONE, RATIS, PERFORMANCE
 The size of the raft segment used by Apache Ratis on 
datanodes.
-  (1 GB by default)
+  (16 KB by default)
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3923a4a2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 270e164..d2d2209 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -119,7 +119,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   private final ConcurrentHashMap createContainerResponseMap;
   private ExecutorService[] executors;
   private final int numExecutors;
-  private final Map containerCommandCompletionMap;
+  private final Map applyTransactionCompletionMap;
+  private long lastIndex;
   /**
* CSM metrics.
*/
@@ -137,7 +138,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 this.executors = executors.toArray(new ExecutorService[numExecutors]);
 this.writeChunkFutureMap = new ConcurrentHashMap<>();
 this.createContainerResponseMap = new ConcurrentHashMap<>();
-containerCommandCompletionMap = new ConcurrentHashMap<>();
+applyTransactionCompletionMap = new ConcurrentHashMap<>();
+this.lastIndex = RaftServerConstants.INVALID_LOG_INDEX;
   }
 
   @Override
@@ -161,10 +163,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 
   private long loadSnapshot(SingleFileSnapshotInfo snapshot) {
 if (snapshot == null) {
-  TermIndex empty = TermIndex.newTermIndex(0, 0);
+  TermIndex empty = TermIndex.newTermIndex(0,
+  RaftServerConstants.INVALID_LOG_INDEX);
   LOG.info("The snapshot info is null." +
   "Setting the last 

hadoop git commit: HADOOP-15930. Exclude MD5 checksum files from release artifact.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 ef085e088 -> 29f155907


HADOOP-15930. Exclude MD5 checksum files from release artifact.

(cherry picked from commit df5e863fee544c9283e28a21c2788c008d7e3e04)
(cherry picked from commit 4199086084245a32d5a8ec09e1571948f0765c83)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29f15590
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29f15590
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29f15590

Branch: refs/heads/branch-2.9
Commit: 29f1559071d2f8205efdcef28b8fc9f52539c86d
Parents: ef085e0
Author: Akira Ajisaka 
Authored: Wed Nov 14 14:50:34 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 10:50:00 2018 +0900

--
 dev-support/bin/create-release | 14 --
 1 file changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29f15590/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index fbe3fb2..33c87cd 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -167,11 +167,6 @@ function run()
   fi
 }
 
-function domd5()
-{
-  run "${MD5SUM}" "${1}" > "${1}.md5"
-}
-
 ## @description  set JAVA_HOME properly
 ## @audience public
 ## @stabilityunstable
@@ -241,11 +236,6 @@ function set_defaults
 fi
   fi
 
-  MD5SUM=$(command -v md5sum)
-  if [[ -z "${MD5SUM}" ]]; then
-MD5SUM=$(command -v md5)
-  fi
-
   NATIVE=false
   OSNAME=$(uname -s)
 
@@ -601,9 +591,6 @@ function signartifacts
   declare i
 
   if [[ "${SIGN}" = false ]]; then
-for i in ${ARTIFACTS_DIR}/*; do
-  domd5 "${i}"
-done
 echo ""
 echo "Remember to sign the artifacts before staging them on the open"
 echo ""
@@ -615,7 +602,6 @@ function signartifacts
   for i in ${ARTIFACTS_DIR}/*; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 ${GPG} --print-mds "${i}" > "${i}.mds"
-domd5 "${i}"
   done
 
   if [[ "${ASFRELEASE}" = true ]]; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15930. Exclude MD5 checksum files from release artifact.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3aac324a0 -> 419908608


HADOOP-15930. Exclude MD5 checksum files from release artifact.

(cherry picked from commit df5e863fee544c9283e28a21c2788c008d7e3e04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41990860
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41990860
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41990860

Branch: refs/heads/branch-2
Commit: 4199086084245a32d5a8ec09e1571948f0765c83
Parents: 3aac324
Author: Akira Ajisaka 
Authored: Wed Nov 14 14:50:34 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 10:49:37 2018 +0900

--
 dev-support/bin/create-release | 14 --
 1 file changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41990860/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index fbe3fb2..33c87cd 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -167,11 +167,6 @@ function run()
   fi
 }
 
-function domd5()
-{
-  run "${MD5SUM}" "${1}" > "${1}.md5"
-}
-
 ## @description  set JAVA_HOME properly
 ## @audience public
 ## @stabilityunstable
@@ -241,11 +236,6 @@ function set_defaults
 fi
   fi
 
-  MD5SUM=$(command -v md5sum)
-  if [[ -z "${MD5SUM}" ]]; then
-MD5SUM=$(command -v md5)
-  fi
-
   NATIVE=false
   OSNAME=$(uname -s)
 
@@ -601,9 +591,6 @@ function signartifacts
   declare i
 
   if [[ "${SIGN}" = false ]]; then
-for i in ${ARTIFACTS_DIR}/*; do
-  domd5 "${i}"
-done
 echo ""
 echo "Remember to sign the artifacts before staging them on the open"
 echo ""
@@ -615,7 +602,6 @@ function signartifacts
   for i in ${ARTIFACTS_DIR}/*; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 ${GPG} --print-mds "${i}" > "${i}.mds"
-domd5 "${i}"
   done
 
   if [[ "${ASFRELEASE}" = true ]]; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15930. Exclude MD5 checksum files from release artifact.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 e3593c09d -> 77c4bc906


HADOOP-15930. Exclude MD5 checksum files from release artifact.

(cherry picked from commit df5e863fee544c9283e28a21c2788c008d7e3e04)
(cherry picked from commit 4199086084245a32d5a8ec09e1571948f0765c83)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77c4bc90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77c4bc90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77c4bc90

Branch: refs/heads/branch-2.8
Commit: 77c4bc9068abc0eef5bbc3de097aceb57e31bb7e
Parents: e3593c0
Author: Akira Ajisaka 
Authored: Wed Nov 14 14:50:34 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 10:50:22 2018 +0900

--
 dev-support/bin/create-release | 14 --
 1 file changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77c4bc90/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 09066a1..81b40b2 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -167,11 +167,6 @@ function run()
   fi
 }
 
-function domd5()
-{
-  run "${MD5SUM}" "${1}" > "${1}.md5"
-}
-
 ## @description  set JAVA_HOME properly
 ## @audience public
 ## @stabilityunstable
@@ -241,11 +236,6 @@ function set_defaults
 fi
   fi
 
-  MD5SUM=$(command -v md5sum)
-  if [[ -z "${MD5SUM}" ]]; then
-MD5SUM=$(command -v md5)
-  fi
-
   NATIVE=false
   OSNAME=$(uname -s)
 
@@ -601,9 +591,6 @@ function signartifacts
   declare i
 
   if [[ "${SIGN}" = false ]]; then
-for i in ${ARTIFACTS_DIR}/*; do
-  domd5 "${i}"
-done
 echo ""
 echo "Remember to sign the artifacts before staging them on the open"
 echo ""
@@ -615,7 +602,6 @@ function signartifacts
   for i in ${ARTIFACTS_DIR}/*; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 ${GPG} --print-mds "${i}" > "${i}.mds"
-domd5 "${i}"
   done
 
   if [[ "${ASFRELEASE}" = true ]]; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-836. Add TokenIdentifier Ozone for delegation token and block token. Contributed by Ajay Kumar.

2018-11-14 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-4 629347bea -> dea0b7be3


HDDS-836. Add TokenIdentifier Ozone for delegation token and block token. 
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dea0b7be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dea0b7be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dea0b7be

Branch: refs/heads/HDDS-4
Commit: dea0b7be3aa85043451305493c96c534026579d1
Parents: 629347b
Author: Xiaoyu Yao 
Authored: Wed Nov 14 14:26:33 2018 -0800
Committer: Xiaoyu Yao 
Committed: Wed Nov 14 14:26:33 2018 -0800

--
 .../hdds/security/x509/keys/SecurityUtil.java   |  59 
 hadoop-hdds/common/src/main/proto/hdds.proto|  24 ++
 .../security/OzoneBlockTokenIdentifier.java | 178 +++
 .../ozone/security/OzoneBlockTokenSelector.java |  55 
 .../security/OzoneDelegationTokenSelector.java  |  52 
 .../hadoop/ozone/security/OzoneSecretKey.java   | 195 
 .../ozone/security/OzoneTokenIdentifier.java| 217 ++
 .../hadoop/ozone/security/package-info.java |  21 ++
 .../src/main/proto/OzoneManagerProtocol.proto   |  20 ++
 .../security/TestOzoneBlockTokenIdentifier.java | 255 
 .../security/TestOzoneTokenIdentifier.java  | 300 +++
 .../hadoop/ozone/security/package-info.java |  21 ++
 12 files changed, 1397 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dea0b7be/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
index 2ca8825..6147d3a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
@@ -18,6 +18,15 @@
  */
 package org.apache.hadoop.hdds.security.x509.keys;
 
+import java.security.KeyFactory;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.PrivateKey;
+import java.security.PublicKey;
+import java.security.spec.InvalidKeySpecException;
+import java.security.spec.PKCS8EncodedKeySpec;
+import java.security.spec.X509EncodedKeySpec;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
 import org.bouncycastle.asn1.ASN1ObjectIdentifier;
 import org.bouncycastle.asn1.ASN1Sequence;
@@ -76,4 +85,54 @@ public final class SecurityUtil {
 }
 throw new CertificateException("No PKCS#9 extension found in CSR");
   }
+
+  /*
+   * Returns private key created from encoded key.
+   * @return private key if successful else returns null.
+   */
+  public static PrivateKey getPrivateKey(byte[] encodedKey,
+  SecurityConfig secureConfig) {
+PrivateKey pvtKey = null;
+if (encodedKey == null || encodedKey.length == 0) {
+  return null;
+}
+
+try {
+  KeyFactory kf = null;
+
+  kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(),
+  secureConfig.getProvider());
+  pvtKey = kf.generatePrivate(new PKCS8EncodedKeySpec(encodedKey));
+
+} catch (NoSuchAlgorithmException | InvalidKeySpecException |
+NoSuchProviderException e) {
+  return null;
+}
+return pvtKey;
+  }
+
+  /*
+   * Returns public key created from encoded key.
+   * @return public key if successful else returns null.
+   */
+  public static PublicKey getPublicKey(byte[] encodedKey,
+  SecurityConfig secureConfig) {
+PublicKey key = null;
+if (encodedKey == null || encodedKey.length == 0) {
+  return null;
+}
+
+try {
+  KeyFactory kf = null;
+  kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(),
+  secureConfig.getProvider());
+  key = kf.generatePublic(new X509EncodedKeySpec(encodedKey));
+
+} catch (NoSuchAlgorithmException | InvalidKeySpecException |
+NoSuchProviderException e) {
+  return null;
+}
+return key;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dea0b7be/hadoop-hdds/common/src/main/proto/hdds.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto 
b/hadoop-hdds/common/src/main/proto/hdds.proto
index a0c6f16..c1cfe53 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -191,6 +191,30 @@ message ContainerBlockID {
 required int64 localID = 2;
 }
 
+
+/**
+ 

hadoop git commit: HADOOP-15930. Exclude MD5 checksum files from release artifact.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk bac8807c8 -> df5e863fe


HADOOP-15930. Exclude MD5 checksum files from release artifact.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df5e863f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df5e863f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df5e863f

Branch: refs/heads/trunk
Commit: df5e863fee544c9283e28a21c2788c008d7e3e04
Parents: bac8807
Author: Akira Ajisaka 
Authored: Wed Nov 14 14:50:34 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 10:45:34 2018 +0900

--
 dev-support/bin/create-release | 14 --
 1 file changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df5e863f/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index c861654..9d0decf 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -169,11 +169,6 @@ function run()
   fi
 }
 
-function domd5()
-{
-  run "${MD5SUM}" "${1}" > "${1}.md5"
-}
-
 function header()
 {
   echo
@@ -235,11 +230,6 @@ function set_defaults
 fi
   fi
 
-  MD5SUM=$(command -v md5sum)
-  if [[ -z "${MD5SUM}" ]]; then
-MD5SUM=$(command -v md5)
-  fi
-
   NATIVE=false
   OSNAME=$(uname -s)
 
@@ -641,9 +631,6 @@ function signartifacts
   declare ret
 
   if [[ "${SIGN}" = false ]]; then
-for i in ${ARTIFACTS_DIR}/*; do
-  domd5 "${i}"
-done
 echo ""
 echo "Remember to sign the artifacts before staging them on the open"
 echo ""
@@ -655,7 +642,6 @@ function signartifacts
   for i in ${ARTIFACTS_DIR}/*; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 ${GPG} --print-mds "${i}" > "${i}.mds"
-domd5 "${i}"
   done
 
   if [[ "${ASFRELEASE}" = true ]]; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15930. Exclude MD5 checksum files from release artifact.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 5d532cfc6 -> ba75aeec2


HADOOP-15930. Exclude MD5 checksum files from release artifact.

(cherry picked from commit df5e863fee544c9283e28a21c2788c008d7e3e04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba75aeec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba75aeec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba75aeec

Branch: refs/heads/branch-3.1
Commit: ba75aeec284b345381a00c751a2faed9c04d21fe
Parents: 5d532cf
Author: Akira Ajisaka 
Authored: Wed Nov 14 14:50:34 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 10:47:32 2018 +0900

--
 dev-support/bin/create-release | 14 --
 1 file changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba75aeec/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index b0f01d7..8c01cba 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -169,11 +169,6 @@ function run()
   fi
 }
 
-function domd5()
-{
-  run "${MD5SUM}" "${1}" > "${1}.md5"
-}
-
 function header()
 {
   echo
@@ -235,11 +230,6 @@ function set_defaults
 fi
   fi
 
-  MD5SUM=$(command -v md5sum)
-  if [[ -z "${MD5SUM}" ]]; then
-MD5SUM=$(command -v md5)
-  fi
-
   NATIVE=false
   OSNAME=$(uname -s)
 
@@ -641,9 +631,6 @@ function signartifacts
   declare ret
 
   if [[ "${SIGN}" = false ]]; then
-for i in ${ARTIFACTS_DIR}/*; do
-  domd5 "${i}"
-done
 echo ""
 echo "Remember to sign the artifacts before staging them on the open"
 echo ""
@@ -655,7 +642,6 @@ function signartifacts
   for i in ${ARTIFACTS_DIR}/*; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 ${GPG} --print-mds "${i}" > "${i}.mds"
-domd5 "${i}"
   done
 
   if [[ "${ASFRELEASE}" = true ]]; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15930. Exclude MD5 checksum files from release artifact.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 37082a664 -> 9f3d42f10


HADOOP-15930. Exclude MD5 checksum files from release artifact.

(cherry picked from commit df5e863fee544c9283e28a21c2788c008d7e3e04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f3d42f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f3d42f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f3d42f1

Branch: refs/heads/branch-3.2
Commit: 9f3d42f1068dcfba99f89514950d05539b65842c
Parents: 37082a6
Author: Akira Ajisaka 
Authored: Wed Nov 14 14:50:34 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 10:47:13 2018 +0900

--
 dev-support/bin/create-release | 14 --
 1 file changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f3d42f1/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index c861654..9d0decf 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -169,11 +169,6 @@ function run()
   fi
 }
 
-function domd5()
-{
-  run "${MD5SUM}" "${1}" > "${1}.md5"
-}
-
 function header()
 {
   echo
@@ -235,11 +230,6 @@ function set_defaults
 fi
   fi
 
-  MD5SUM=$(command -v md5sum)
-  if [[ -z "${MD5SUM}" ]]; then
-MD5SUM=$(command -v md5)
-  fi
-
   NATIVE=false
   OSNAME=$(uname -s)
 
@@ -641,9 +631,6 @@ function signartifacts
   declare ret
 
   if [[ "${SIGN}" = false ]]; then
-for i in ${ARTIFACTS_DIR}/*; do
-  domd5 "${i}"
-done
 echo ""
 echo "Remember to sign the artifacts before staging them on the open"
 echo ""
@@ -655,7 +642,6 @@ function signartifacts
   for i in ${ARTIFACTS_DIR}/*; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 ${GPG} --print-mds "${i}" > "${i}.mds"
-domd5 "${i}"
   done
 
   if [[ "${ASFRELEASE}" = true ]]; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15930. Exclude MD5 checksum files from release artifact.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 64cb97fb4 -> 2db612fff


HADOOP-15930. Exclude MD5 checksum files from release artifact.

(cherry picked from commit df5e863fee544c9283e28a21c2788c008d7e3e04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2db612ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2db612ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2db612ff

Branch: refs/heads/branch-3.0
Commit: 2db612fff96b7348f6549c83182dce6b3fa0b04a
Parents: 64cb97f
Author: Akira Ajisaka 
Authored: Wed Nov 14 14:50:34 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 10:47:53 2018 +0900

--
 dev-support/bin/create-release | 14 --
 1 file changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2db612ff/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index b0f01d7..8c01cba 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -169,11 +169,6 @@ function run()
   fi
 }
 
-function domd5()
-{
-  run "${MD5SUM}" "${1}" > "${1}.md5"
-}
-
 function header()
 {
   echo
@@ -235,11 +230,6 @@ function set_defaults
 fi
   fi
 
-  MD5SUM=$(command -v md5sum)
-  if [[ -z "${MD5SUM}" ]]; then
-MD5SUM=$(command -v md5)
-  fi
-
   NATIVE=false
   OSNAME=$(uname -s)
 
@@ -641,9 +631,6 @@ function signartifacts
   declare ret
 
   if [[ "${SIGN}" = false ]]; then
-for i in ${ARTIFACTS_DIR}/*; do
-  domd5 "${i}"
-done
 echo ""
 echo "Remember to sign the artifacts before staging them on the open"
 echo ""
@@ -655,7 +642,6 @@ function signartifacts
   for i in ${ARTIFACTS_DIR}/*; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 ${GPG} --print-mds "${i}" > "${i}.mds"
-domd5 "${i}"
   done
 
   if [[ "${ASFRELEASE}" = true ]]; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-819. Match OzoneFileSystem behavior with S3AFileSystem. Contributed by Hanisha Koneru.

2018-11-14 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 21ec4bdae -> bac8807c8


HDDS-819. Match OzoneFileSystem behavior with S3AFileSystem. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bac8807c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bac8807c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bac8807c

Branch: refs/heads/trunk
Commit: bac8807c8b7abb4864aed921585f6e6fc5e9cd5c
Parents: 21ec4bd
Author: Arpit Agarwal 
Authored: Wed Nov 14 16:12:06 2018 -0800
Committer: Arpit Agarwal 
Committed: Wed Nov 14 16:12:06 2018 -0800

--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java | 277 ---
 .../hadoop/fs/ozone/TestOzoneFileSystem.java| 174 +++-
 2 files changed, 411 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bac8807c/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 1336382..78b6e5d 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -24,13 +24,17 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 import java.util.Iterator;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import com.google.common.base.Preconditions;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -199,17 +203,7 @@ public class OzoneFileSystem extends FileSystem {
 deleteObject(key);
   }
 } catch (FileNotFoundException ignored) {
-  // check if the parent directory needs to be created
-  Path parent = f.getParent();
-  try {
-// create all the directories for the parent
-FileStatus parentStatus = getFileStatus(parent);
-LOG.trace("parent key:{} status:{}", key, parentStatus);
-  } catch (FileNotFoundException e) {
-mkdirs(parent);
-  }
-  // This exception needs to ignored as this means that the file currently
-  // does not exists and a new file can thus be created.
+  // this means the file is not found
 }
 
 OzoneOutputStream ozoneOutputStream =
@@ -390,8 +384,14 @@ public class OzoneFileSystem extends FileSystem {
 }
   }
 
-  @Override
-  public boolean delete(Path f, boolean recursive) throws IOException {
+  /**
+   * Deletes the children of the input dir path by iterating though the
+   * DeleteIterator.
+   * @param f directory path to be deleted
+   * @return true if successfully deletes all required keys, false otherwise
+   * @throws IOException
+   */
+  private boolean innerDelete(Path f, boolean recursive) throws IOException {
 LOG.trace("delete() path:{} recursive:{}", f, recursive);
 try {
   DeleteIterator iterator = new DeleteIterator(f, recursive);
@@ -402,35 +402,185 @@ public class OzoneFileSystem extends FileSystem {
 }
   }
 
+  @Override
+  public boolean delete(Path f, boolean recursive) throws IOException {
+LOG.debug("Delete path {} - recursive {}", f, recursive);
+FileStatus status;
+try {
+  status = getFileStatus(f);
+} catch (FileNotFoundException ex) {
+  LOG.warn("delete: Path does not exist: {}", f);
+  return false;
+}
+
+String key = pathToKey(f);
+boolean result;
+
+if (status.isDirectory()) {
+  LOG.debug("delete: Path is a directory: {}", f);
+  key = addTrailingSlashIfNeeded(key);
+
+  if (key.equals("/")) {
+LOG.warn("Cannot delete root directory.");
+return false;
+  }
+
+  result = innerDelete(f, recursive);
+} else {
+  LOG.debug("delete: Path is a file: {}", f);
+  result = deleteObject(key);
+}
+
+if (result) {
+  // If this delete operation removes all files/directories from the
+  // parent direcotry, then an empty parent directory must be created.
+  Path parent = f.getParent();
+  if (parent != null && !parent.isRoot()) {
+createFakeDirectoryIfNecessary(parent);
+  }
+}
+
+return result;
+  }
+
+  /**
+   * Create a fake parent directory key if it does not already exist and no
+   * other child of this parent directory exists.
+   * @param f path to the fake parent directory

[06/18] hadoop git commit: Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel."

2018-11-14 Thread brahma
Revert "HDFS-13732. ECAdmin should print the policy name when an EC policy is 
set. Contributed by Zsolt Venczel."

This reverts commit 7dc79a8b5b7af0bf37d25a221be8ed446b0edb74.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9da6054c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9da6054c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9da6054c

Branch: refs/heads/HDFS-13891
Commit: 9da6054ca4ff6f8bb19506d80685b17d2c79
Parents: 762a56c
Author: Xiao Chen 
Authored: Tue Nov 13 12:43:58 2018 -0800
Committer: Xiao Chen 
Committed: Tue Nov 13 12:44:25 2018 -0800

--
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++--
 .../src/test/resources/testErasureCodingConf.xml|  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9da6054c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 903a1e2..5f8626e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -358,16 +358,16 @@ public class ECAdmin extends Configured implements Tool {
   final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
   try {
 dfs.setErasureCodingPolicy(p, ecPolicyName);
-
-String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
-
-System.out.println("Set " + actualECPolicyName +
-" erasure coding policy on "+ path);
+if (ecPolicyName == null){
+  ecPolicyName = "default";
+}
+System.out.println("Set " + ecPolicyName + " erasure coding policy on" 
+
+" " + path);
 RemoteIterator dirIt = dfs.listStatusIterator(p);
 if (dirIt.hasNext()) {
   System.out.println("Warning: setting erasure coding policy on a " +
   "non-empty directory will not automatically convert existing " +
-  "files to " + actualECPolicyName + " erasure coding policy");
+  "files to " + ecPolicyName + " erasure coding policy");
 }
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9da6054c/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 34f5176..6411fe6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -734,7 +734,7 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
   
 
@@ -752,11 +752,11 @@
   
 
   SubstringComparator
-  Set RS-6-3-1024k erasure coding policy on 
/ecdir
+  Set default erasure coding policy on 
/ecdir
 
 
   SubstringComparator
-  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024k erasure coding policy
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to default 
erasure coding policy
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/18] hadoop git commit: HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. Contributed by Jinhu Wu.

2018-11-14 Thread brahma
HADOOP-15917. AliyunOSS: fix incorrect ReadOps and WriteOps in statistics. 
Contributed by Jinhu Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fade865
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fade865
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fade865

Branch: refs/heads/HDFS-13891
Commit: 3fade865ce84dcf68bcd7de5a5ed1c7d904796e9
Parents: a13be20
Author: Sammi Chen 
Authored: Wed Nov 14 12:58:57 2018 +0800
Committer: Sammi Chen 
Committed: Wed Nov 14 12:58:57 2018 +0800

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  4 --
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 22 --
 .../site/markdown/tools/hadoop-aliyun/index.md  |  5 ++
 .../oss/TestAliyunOSSBlockOutputStream.java | 70 +---
 4 files changed, 83 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fade865/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 4fbb6fb..9c4435c 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -405,7 +405,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 
   ObjectListing objects = store.listObjects(key, maxKeys, null, false);
   while (true) {
-statistics.incrementReadOps(1);
 for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
   String objKey = objectSummary.getKey();
   if (objKey.equals(key + "/")) {
@@ -446,7 +445,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   }
   String nextMarker = objects.getNextMarker();
   objects = store.listObjects(key, maxKeys, nextMarker, false);
-  statistics.incrementReadOps(1);
 } else {
   break;
 }
@@ -694,7 +692,6 @@ public class AliyunOSSFileSystem extends FileSystem {
 new SemaphoredDelegatingExecutor(boundedCopyThreadPool,
 maxConcurrentCopyTasksPerDir, true));
 ObjectListing objects = store.listObjects(srcKey, maxKeys, null, true);
-statistics.incrementReadOps(1);
 // Copy files from src folder to dst
 int copiesToFinish = 0;
 while (true) {
@@ -717,7 +714,6 @@ public class AliyunOSSFileSystem extends FileSystem {
   if (objects.isTruncated()) {
 String nextMarker = objects.getNextMarker();
 objects = store.listObjects(srcKey, maxKeys, nextMarker, true);
-statistics.incrementReadOps(1);
   } else {
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fade865/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index 7639eb3..4fc1325 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -175,6 +175,7 @@ public class AliyunOSSFileSystemStore {
   CannedAccessControlList cannedACL =
   CannedAccessControlList.valueOf(cannedACLName);
   ossClient.setBucketAcl(bucketName, cannedACL);
+  statistics.incrementWriteOps(1);
 }
 
 maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
@@ -216,6 +217,7 @@ public class AliyunOSSFileSystemStore {
   // Here, we choose the simple mode to do batch delete.
   deleteRequest.setQuiet(true);
   DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
+  statistics.incrementWriteOps(1);
   deleteFailed = result.getDeletedObjects();
   tries++;
   if (tries == retry) {
@@ -268,11 +270,13 @@ public class AliyunOSSFileSystemStore {
*/
   public ObjectMetadata getObjectMetadata(String key) {
 try {
-  return ossClient.getObjectMetadata(bucketName, key);
+  ObjectMetadata objectMeta = ossClient.getObjectMetadata(bucketName, key);
+  statistics.incrementReadOps(1);
+  return objectMeta;
 } catch (OSSException osse) {
+  LOG.error("Exception thrown when get object meta: "
+  + 

[16/18] hadoop git commit: HDFS-13906. RBF: Add multiple paths for dfsrouteradmin 'rm' and 'clrquota' commands. Contributed by Ayush Saxena.

2018-11-14 Thread brahma
HDFS-13906. RBF: Add multiple paths for dfsrouteradmin 'rm' and 'clrquota' 
commands. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e040e179
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e040e179
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e040e179

Branch: refs/heads/HDFS-13891
Commit: e040e17985853a0ee1926f4ddae7036bbb4e0146
Parents: a948281
Author: Vinayakumar B 
Authored: Fri Oct 12 17:19:55 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Nov 14 18:01:29 2018 +0530

--
 .../hdfs/tools/federation/RouterAdmin.java  | 102 ++-
 .../federation/router/TestRouterAdminCLI.java   |  82 ---
 2 files changed, 122 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e040e179/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 1aefe4f..4a9cc7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -151,17 +151,7 @@ public class RouterAdmin extends Configured implements 
Tool {
* @param arg List of of command line parameters.
*/
   private void validateMax(String[] arg) {
-if (arg[0].equals("-rm")) {
-  if (arg.length > 2) {
-throw new IllegalArgumentException(
-"Too many arguments, Max=1 argument allowed");
-  }
-} else if (arg[0].equals("-ls")) {
-  if (arg.length > 2) {
-throw new IllegalArgumentException(
-"Too many arguments, Max=1 argument allowed");
-  }
-} else if (arg[0].equals("-clrQuota")) {
+if (arg[0].equals("-ls")) {
   if (arg.length > 2) {
 throw new IllegalArgumentException(
 "Too many arguments, Max=1 argument allowed");
@@ -183,63 +173,63 @@ public class RouterAdmin extends Configured implements 
Tool {
 }
   }
 
-  @Override
-  public int run(String[] argv) throws Exception {
-if (argv.length < 1) {
-  System.err.println("Not enough parameters specified");
-  printUsage();
-  return -1;
-}
-
-int exitCode = -1;
-int i = 0;
-String cmd = argv[i++];
-
-// Verify that we have enough command line parameters
+  /**
+   * Usage: validates the minimum number of arguments for a command.
+   * @param argv List of of command line parameters.
+   * @return true if number of arguments are valid for the command else false.
+   */
+  private boolean validateMin(String[] argv) {
+String cmd = argv[0];
 if ("-add".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-update".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-rm".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-setQuota".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-clrQuota".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-safemode".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-nameservice".equals(cmd)) {
   if (argv.length < 3) {
-System.err.println("Not enough parameters specificed for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 }
+return true;
+  }
+
+  @Override
+  public int run(String[] argv) throws Exception {
+if (argv.length < 1) {
+  System.err.println("Not enough parameters specified");
+   

[14/18] hadoop git commit: HDFS-12284. RBF: Support for Kerberos authentication. Contributed by Sherwood Zheng and Inigo Goiri.

2018-11-14 Thread brahma
HDFS-12284. RBF: Support for Kerberos authentication. Contributed by Sherwood 
Zheng and Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4705b9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4705b9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4705b9b

Branch: refs/heads/HDFS-13891
Commit: c4705b9b2258ecf55f55a96212332fa5aa3ea727
Parents: 7443ed5
Author: Brahma Reddy Battula 
Authored: Wed Nov 7 07:33:37 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Nov 14 18:01:29 2018 +0530

--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml |  10 ++
 .../server/federation/router/RBFConfigKeys.java |  11 ++
 .../hdfs/server/federation/router/Router.java   |  28 
 .../federation/router/RouterAdminServer.java|   7 +
 .../federation/router/RouterHttpServer.java |   5 +-
 .../federation/router/RouterRpcClient.java  |   9 +-
 .../federation/router/RouterRpcServer.java  |  12 ++
 .../src/main/resources/hdfs-rbf-default.xml |  47 ++
 .../fs/contract/router/RouterHDFSContract.java  |   9 +-
 .../fs/contract/router/SecurityConfUtil.java| 156 +++
 .../TestRouterHDFSContractAppendSecure.java |  46 ++
 .../TestRouterHDFSContractConcatSecure.java |  51 ++
 .../TestRouterHDFSContractCreateSecure.java |  48 ++
 .../TestRouterHDFSContractDeleteSecure.java |  46 ++
 ...stRouterHDFSContractGetFileStatusSecure.java |  47 ++
 .../TestRouterHDFSContractMkdirSecure.java  |  48 ++
 .../TestRouterHDFSContractOpenSecure.java   |  47 ++
 .../TestRouterHDFSContractRenameSecure.java |  48 ++
 ...stRouterHDFSContractRootDirectorySecure.java |  63 
 .../TestRouterHDFSContractSeekSecure.java   |  48 ++
 .../TestRouterHDFSContractSetTimesSecure.java   |  48 ++
 .../server/federation/MiniRouterDFSCluster.java |  58 ++-
 22 files changed, 879 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4705b9b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index 386eb41..9f515bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -35,6 +35,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 
+  org.bouncycastle
+  bcprov-jdk16
+  test
+
+
+  org.apache.hadoop
+  hadoop-minikdc
+  test
+
+
   org.apache.hadoop
   hadoop-common
   provided

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4705b9b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index bbd4250..fa474f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -242,4 +242,15 @@ public class RBFConfigKeys extends 
CommonConfigurationKeysPublic {
   FEDERATION_ROUTER_PREFIX + "quota-cache.update.interval";
   public static final long DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL_DEFAULT =
   6;
+
+  // HDFS Router security
+  public static final String DFS_ROUTER_KEYTAB_FILE_KEY =
+  FEDERATION_ROUTER_PREFIX + "keytab.file";
+  public static final String DFS_ROUTER_KERBEROS_PRINCIPAL_KEY =
+  FEDERATION_ROUTER_PREFIX + "kerberos.principal";
+  public static final String DFS_ROUTER_KERBEROS_PRINCIPAL_HOSTNAME_KEY =
+  FEDERATION_ROUTER_PREFIX + "kerberos.principal.hostname";
+
+  public static final String DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY 
=
+  FEDERATION_ROUTER_PREFIX + "kerberos.internal.spnego.principal";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4705b9b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
index 5ddc129..3288273 100644
--- 

[18/18] hadoop git commit: HDFS-13834. RBF: Connection creator thread should catch Throwable. Contributed by CR Hota.

2018-11-14 Thread brahma
HDFS-13834. RBF: Connection creator thread should catch Throwable. Contributed 
by CR Hota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d8cc85c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d8cc85c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d8cc85c

Branch: refs/heads/HDFS-13891
Commit: 4d8cc85c2c13aecdb517a07aac9da038a064ecf6
Parents: 6291f11
Author: Inigo Goiri 
Authored: Wed Nov 14 18:35:12 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Nov 14 18:35:12 2018 +0530

--
 .../federation/router/ConnectionManager.java|  4 +-
 .../router/TestConnectionManager.java   | 43 
 2 files changed, 46 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d8cc85c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
index 9fb83e4..fa2bf94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
@@ -393,7 +393,7 @@ public class ConnectionManager {
   /**
* Thread that creates connections asynchronously.
*/
-  private static class ConnectionCreator extends Thread {
+  static class ConnectionCreator extends Thread {
 /** If the creator is running. */
 private boolean running = true;
 /** Queue to push work to. */
@@ -426,6 +426,8 @@ public class ConnectionManager {
 } catch (InterruptedException e) {
   LOG.error("The connection creator was interrupted");
   this.running = false;
+} catch (Throwable e) {
+  LOG.error("Fatal error caught by connection creator ", e);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d8cc85c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java
index 0e1eb40..765f6c8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java
@@ -22,12 +22,17 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.Rule;
+import org.junit.rules.ExpectedException;
 
 import java.io.IOException;
 import java.util.Map;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -49,6 +54,7 @@ public class TestConnectionManager {
   private static final UserGroupInformation TEST_USER3 =
   UserGroupInformation.createUserForTesting("user3", TEST_GROUP);
   private static final String TEST_NN_ADDRESS = "nn1:8080";
+  private static final String UNRESOLVED_TEST_NN_ADDRESS = "unknownhost:8080";
 
   @Before
   public void setup() throws Exception {
@@ -59,6 +65,9 @@ public class TestConnectionManager {
 connManager.start();
   }
 
+  @Rule
+  public ExpectedException exceptionRule = ExpectedException.none();
+
   @After
   public void shutdown() {
 if (connManager != null) {
@@ -122,6 +131,40 @@ public class TestConnectionManager {
   }
 
   @Test
+  public void testConnectionCreatorWithException() throws Exception {
+// Create a bad connection pool pointing to unresolvable namenode address.
+ConnectionPool badPool = new ConnectionPool(
+conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 0, 10,
+ClientProtocol.class);
+BlockingQueue queue = new ArrayBlockingQueue<>(1);
+queue.add(badPool);
+ConnectionManager.ConnectionCreator 

[01/18] hadoop git commit: HADOOP-15869. BlockDecompressorStream#decompress should not return -1 in case of IOException. Contributed by Surendra Singh Lilhore [Forced Update!]

2018-11-14 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-13891 2bd2e6c31 -> 4d8cc85c2 (forced update)


HADOOP-15869. BlockDecompressorStream#decompress should not return -1 in case 
of IOException. Contributed by Surendra Singh Lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75291e6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75291e6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75291e6d

Branch: refs/heads/HDFS-13891
Commit: 75291e6d53c13debf45493a870a898b63779914b
Parents: e7b63ba
Author: Surendra Singh Lilhore 
Authored: Tue Nov 13 20:22:58 2018 +0530
Committer: Surendra Singh Lilhore 
Committed: Tue Nov 13 20:22:58 2018 +0530

--
 .../io/compress/BlockDecompressorStream.java|  4 +--
 .../compress/TestBlockDecompressorStream.java   | 29 
 2 files changed, 31 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75291e6d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
index 72509c7..de457d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
@@ -71,8 +71,8 @@ public class BlockDecompressorStream extends 
DecompressorStream {
 if (noUncompressedBytes == originalBlockSize) {
   // Get original data size
   try {
-originalBlockSize =  rawReadInt();
-  } catch (IOException ioe) {
+originalBlockSize = rawReadInt();
+  } catch (EOFException e) {
 return -1;
   }
   noUncompressedBytes = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75291e6d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
index c976572..cdab772 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
@@ -18,11 +18,15 @@
 package org.apache.hadoop.io.compress;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.nio.ByteBuffer;
 
 import org.junit.Test;
@@ -74,4 +78,29 @@ public class TestBlockDecompressorStream {
   fail("unexpected IOException : " + e);
 }
   }
+
+  @Test
+  public void testReadWhenIoExceptionOccure() throws IOException {
+File file = new File("testReadWhenIOException");
+try {
+  file.createNewFile();
+  InputStream io = new FileInputStream(file) {
+@Override
+public int read() throws IOException {
+  throw new IOException("File blocks missing");
+}
+  };
+
+  try (BlockDecompressorStream blockDecompressorStream =
+  new BlockDecompressorStream(io, new FakeDecompressor(), 1024)) {
+int byteRead = blockDecompressorStream.read();
+fail("Should not return -1 in case of IOException. Byte read "
++ byteRead);
+  } catch (IOException e) {
+assertTrue(e.getMessage().contains("File blocks missing"));
+  }
+} finally {
+  file.delete();
+}
+  }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/18] hadoop git commit: HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul Kumar Singh.

2018-11-14 Thread brahma
HDDS-834. Datanode goes OOM based because of segment size. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9482817
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9482817
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9482817

Branch: refs/heads/HDFS-13891
Commit: a94828170684793b80efdd76dc8a3167e324c0ea
Parents: 3fade86
Author: Shashikant Banerjee 
Authored: Wed Nov 14 15:53:22 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 14 15:53:22 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  2 +-
 .../common/src/main/resources/ozone-default.xml |  4 +--
 .../server/ratis/ContainerStateMachine.java | 27 +++-
 3 files changed, 24 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index cedcc43..b748d69 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -74,7 +74,7 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
   "dfs.container.ratis.segment.size";
   public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-  1 * 1024 * 1024 * 1024;
+  16 * 1024;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
   "dfs.container.ratis.segment.preallocated.size";
   public static final int

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 54bffd5..e94e7e1 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -175,10 +175,10 @@
   
   
 dfs.container.ratis.segment.size
-1073741824
+16384
 OZONE, RATIS, PERFORMANCE
 The size of the raft segment used by Apache Ratis on 
datanodes.
-  (1 GB by default)
+  (16 KB by default)
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9482817/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 3899bde..a3b496a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -120,7 +120,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
   createContainerFutureMap;
   private ExecutorService[] executors;
   private final int numExecutors;
-  private final Map containerCommandCompletionMap;
+  private final Map applyTransactionCompletionMap;
+  private long lastIndex;
   /**
* CSM metrics.
*/
@@ -138,7 +139,8 @@ public class ContainerStateMachine extends BaseStateMachine 
{
 this.executors = executors.toArray(new ExecutorService[numExecutors]);
 this.writeChunkFutureMap = new ConcurrentHashMap<>();
 this.createContainerFutureMap = new ConcurrentHashMap<>();
-containerCommandCompletionMap = new ConcurrentHashMap<>();
+applyTransactionCompletionMap = new ConcurrentHashMap<>();
+this.lastIndex = RaftServerConstants.INVALID_LOG_INDEX;
   }
 
   @Override
@@ -162,10 +164,12 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 
   private long loadSnapshot(SingleFileSnapshotInfo snapshot) {
 if (snapshot == null) {
-  TermIndex empty = TermIndex.newTermIndex(0, 0);
+  TermIndex empty = TermIndex.newTermIndex(0,
+  RaftServerConstants.INVALID_LOG_INDEX);
   LOG.info("The snapshot info is null." +
   "Setting the last applied index to:" + empty);
   setLastAppliedTermIndex(empty);
+  lastIndex = 

[15/18] hadoop git commit: HDFS-14024. RBF: ProvidedCapacityTotal json exception in NamenodeHeartbeatService. Contributed by CR Hota.

2018-11-14 Thread brahma
HDFS-14024. RBF: ProvidedCapacityTotal json exception in 
NamenodeHeartbeatService. Contributed by CR Hota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7443ed5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7443ed5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7443ed5b

Branch: refs/heads/HDFS-13891
Commit: 7443ed5b447126901f6ea8698e5e9315beb3517e
Parents: 97a6e6d
Author: Inigo Goiri 
Authored: Thu Nov 1 11:49:33 2018 -0700
Committer: Brahma Reddy Battula 
Committed: Wed Nov 14 18:01:29 2018 +0530

--
 .../hdfs/server/federation/router/NamenodeHeartbeatService.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7443ed5b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
index a1adf77..1349aa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
@@ -351,7 +351,7 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 jsonObject.getLong("PendingReplicationBlocks"),
 jsonObject.getLong("UnderReplicatedBlocks"),
 jsonObject.getLong("PendingDeletionBlocks"),
-jsonObject.getLong("ProvidedCapacityTotal"));
+jsonObject.optLong("ProvidedCapacityTotal"));
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/18] hadoop git commit: HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. Contributed by Shashikant Banerjee.

2018-11-14 Thread brahma
HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/671fd652
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/671fd652
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/671fd652

Branch: refs/heads/HDFS-13891
Commit: 671fd6524b2640474de2bc3b8dbaa0a3cf7fcf01
Parents: 75291e6
Author: Shashikant Banerjee 
Authored: Tue Nov 13 23:39:14 2018 +0530
Committer: Shashikant Banerjee 
Committed: Tue Nov 13 23:39:14 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  28 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |  65 ++-
 .../hdds/scm/storage/ChunkOutputStream.java | 448 +++
 .../hdds/scm/XceiverClientAsyncReply.java   |  98 
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  12 +-
 .../scm/storage/ContainerProtocolCalls.java |  57 ++-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  24 +-
 .../common/src/main/resources/ozone-default.xml |  26 +-
 .../keyvalue/impl/BlockManagerImpl.java |   3 +
 .../hadoop/ozone/client/OzoneClientUtils.java   |  27 --
 .../ozone/client/io/ChunkGroupOutputStream.java | 337 +++---
 .../hadoop/ozone/client/rpc/RpcClient.java  |  27 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  45 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  19 +
 .../apache/hadoop/ozone/RatisTestHelper.java|   2 +-
 .../rpc/TestCloseContainerHandlingByClient.java | 252 +++
 .../rpc/TestContainerStateMachineFailures.java  |  20 +-
 .../client/rpc/TestFailureHandlingByClient.java | 213 +
 .../ozone/container/ContainerTestHelper.java|  34 ++
 .../container/ozoneimpl/TestOzoneContainer.java |   2 +-
 .../ozone/scm/TestXceiverClientMetrics.java |   3 +-
 .../ozone/web/TestOzoneRestWithMiniCluster.java |   2 +-
 .../web/storage/DistributedStorageHandler.java  |  42 +-
 .../hadoop/ozone/freon/TestDataValidate.java|   6 +
 .../ozone/freon/TestRandomKeyGenerator.java |   6 +
 25 files changed, 1248 insertions(+), 550 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/671fd652/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index cc34e27..9acd832 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdds.scm;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
@@ -47,6 +48,7 @@ import java.util.Map;
 import java.util.HashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
 /**
  * A Client for the storageContainer protocol.
@@ -163,7 +165,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 // In case the command gets retried on a 2nd datanode,
 // sendCommandAsyncCall will create a new channel and async stub
 // in case these don't exist for the specific datanode.
-responseProto = sendCommandAsync(request, dn).get();
+responseProto = sendCommandAsync(request, dn).getResponse().get();
 if (responseProto.getResult() == ContainerProtos.Result.SUCCESS) {
   break;
 }
@@ -197,13 +199,23 @@ public class XceiverClientGrpc extends XceiverClientSpi {
* @throws IOException
*/
   @Override
-  public CompletableFuture sendCommandAsync(
+  public XceiverClientAsyncReply sendCommandAsync(
   ContainerCommandRequestProto request)
   throws IOException, ExecutionException, InterruptedException {
-return sendCommandAsync(request, pipeline.getFirstNode());
+XceiverClientAsyncReply asyncReply =
+sendCommandAsync(request, pipeline.getFirstNode());
+
+// TODO : for now make this API sync in nature as async requests are
+// served out of order over XceiverClientGrpc. This needs to be fixed
+// if this API is to be used for I/O path. Currently, this is not
+// used for Read/Write Operation but for tests.
+if (!HddsUtils.isReadOnly(request)) {
+ 

hadoop git commit: HADOOP-15926. Document upgrading the section in NOTICE.txt when upgrading the version of AWS SDK. Contributed by Dinesh Chitlangia.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 9f3d42f10 -> 8c9681d7f


HADOOP-15926. Document upgrading the section in NOTICE.txt when upgrading the 
version of AWS SDK. Contributed by Dinesh Chitlangia.

(cherry picked from commit 66b1335bb3a9a6f3a3db455540c973d4a85bef73)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c9681d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c9681d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c9681d7

Branch: refs/heads/branch-3.2
Commit: 8c9681d7f08f9c7cd6bb15b45605be668fad69ef
Parents: 9f3d42f
Author: Akira Ajisaka 
Authored: Thu Nov 15 16:30:24 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 16:31:05 2018 +0900

--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c9681d7/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
index 31d3a5f..cb07a49 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
@@ -1182,6 +1182,7 @@ as it may take a couple of SDK updates before it is ready.
 1. Identify the latest AWS SDK [available for 
download](https://aws.amazon.com/sdk-for-java/).
 1. Create a private git branch of trunk for JIRA, and in
   `hadoop-project/pom.xml` update the `aws-java-sdk.version` to the new SDK 
version.
+1. Update AWS SDK versions in NOTICE.txt.
 1. Do a clean build and rerun all the `hadoop-aws` tests, with and without the 
`-Ds3guard -Ddynamodb` options.
   This includes the `-Pscale` set, with a role defined for the assumed role 
tests.
   in `fs.s3a.assumed.role.arn` for testing assumed roles,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15926. Document upgrading the section in NOTICE.txt when upgrading the version of AWS SDK. Contributed by Dinesh Chitlangia.

2018-11-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk df5e863fe -> 66b1335bb


HADOOP-15926. Document upgrading the section in NOTICE.txt when upgrading the 
version of AWS SDK. Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66b1335b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66b1335b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66b1335b

Branch: refs/heads/trunk
Commit: 66b1335bb3a9a6f3a3db455540c973d4a85bef73
Parents: df5e863
Author: Akira Ajisaka 
Authored: Thu Nov 15 16:30:24 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 15 16:30:24 2018 +0900

--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b1335b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
index 31d3a5f..cb07a49 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
@@ -1182,6 +1182,7 @@ as it may take a couple of SDK updates before it is ready.
 1. Identify the latest AWS SDK [available for 
download](https://aws.amazon.com/sdk-for-java/).
 1. Create a private git branch of trunk for JIRA, and in
   `hadoop-project/pom.xml` update the `aws-java-sdk.version` to the new SDK 
version.
+1. Update AWS SDK versions in NOTICE.txt.
 1. Do a clean build and rerun all the `hadoop-aws` tests, with and without the 
`-Ds3guard -Ddynamodb` options.
   This includes the `-Pscale` set, with a role defined for the assumed role 
tests.
   in `fs.s3a.assumed.role.arn` for testing assumed roles,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-11-14 Thread elek
Repository: hadoop
Updated Tags:  refs/tags/ozone-0.3.0-alpha-RC1 [created] cdad29240

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/18] hadoop git commit: YARN-9001. [Submarine] Use AppAdminClient instead of ServiceClient to sumbit jobs. (Zac Zhou via wangda)

2018-11-14 Thread brahma
YARN-9001. [Submarine] Use AppAdminClient instead of ServiceClient to sumbit 
jobs. (Zac Zhou via wangda)

Change-Id: Ic3d6c1e439df9cdf74448b345b925343224efe51


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fcbd205c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fcbd205c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fcbd205c

Branch: refs/heads/HDFS-13891
Commit: fcbd205cc35e7411ac33860c78b9e1e70697bb4a
Parents: 9da6054
Author: Wangda Tan 
Authored: Tue Nov 13 13:13:27 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 13:13:27 2018 -0800

--
 .../yarn/service/client/ServiceClient.java  |  1 +
 .../submarine/runtimes/common/JobMonitor.java   |  6 +++
 .../yarnservice/YarnServiceJobMonitor.java  | 27 +
 .../yarnservice/YarnServiceJobSubmitter.java| 42 ++--
 .../runtimes/yarnservice/YarnServiceUtils.java  | 15 +++
 .../yarnservice/TestYarnServiceRunJobCli.java   | 12 +++---
 .../submarine/common/MockClientContext.java |  1 -
 7 files changed, 80 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcbd205c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 0bc5a2c..713d890 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1552,6 +1552,7 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   LOG.info("Service {} does not have an application ID", serviceName);
   return appSpec;
 }
+appSpec.setId(currentAppId.toString());
 ApplicationReport appReport = 
yarnClient.getApplicationReport(currentAppId);
 appSpec.setState(convertState(appReport.getYarnApplicationState()));
 ApplicationTimeout lifetime =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcbd205c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
index c81393b..35e21fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/JobMonitor.java
@@ -48,6 +48,11 @@ public abstract class JobMonitor {
   throws IOException, YarnException;
 
   /**
+   * Cleanup AppAdminClient, etc.
+   */
+  public void cleanup() throws IOException {}
+
+  /**
* Continue wait and print status if job goes to ready or final state.
* @param jobName
* @throws IOException
@@ -80,5 +85,6 @@ public abstract class JobMonitor {
 throw new IOException(e);
   }
 }
+cleanup();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcbd205c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
index fab018a..ee68ddb 100644
--- 

[02/18] hadoop git commit: HDDS-675. Add blocking buffer and use watchApi for flush/close in OzoneClient. Contributed by Shashikant Banerjee.

2018-11-14 Thread brahma
http://git-wip-us.apache.org/repos/asf/hadoop/blob/671fd652/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 43517ae..935423d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
-import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -27,11 +26,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-StorageContainerException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -55,15 +49,17 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
-import org.slf4j.event.Level;
 
 import java.io.IOException;
-import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.TimeUnit;
+
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 
 /**
  * Tests Close Container Exception handling by Ozone Client.
@@ -79,7 +75,6 @@ public class TestCloseContainerHandlingByClient {
   private static String volumeName;
   private static String bucketName;
   private static String keyString;
-  private static int maxRetries;
 
   /**
* Create a MiniDFSCluster for testing.
@@ -91,15 +86,14 @@ public class TestCloseContainerHandlingByClient {
   @BeforeClass
   public static void init() throws Exception {
 conf = new OzoneConfiguration();
-maxRetries = 100;
-conf.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, maxRetries);
-conf.set(OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL, "200ms");
 chunkSize = (int) OzoneConsts.MB;
 blockSize = 4 * chunkSize;
-conf.setInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize);
+conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms");
+conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, 
TimeUnit.MILLISECONDS);
+conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
+conf.setQuietMode(false);
 conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, (4));
-cluster = MiniOzoneCluster.newBuilder(conf)
-.setNumDatanodes(3).build();
+cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7).build();
 cluster.waitForClusterToBeReady();
 //the easiest way to create an open container is creating a key
 client = OzoneClientFactory.getClient(conf);
@@ -121,44 +115,29 @@ public class TestCloseContainerHandlingByClient {
 }
   }
 
-  private static String fixedLengthString(String string, int length) {
-return String.format("%1$" + length + "s", string);
-  }
-
   @Test
   public void testBlockWritesWithFlushAndClose() throws Exception {
 String keyName = "standalone";
-OzoneOutputStream key =
-createKey(keyName, ReplicationType.STAND_ALONE, 0);
+OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
 // write data more than 1 chunk
-byte[] data =
-fixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
+byte[] data = ContainerTestHelper
+.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
 key.write(data);
 
 Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
 //get the name of a valid container
 OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-.setBucketName(bucketName)
-.setType(HddsProtos.ReplicationType.STAND_ALONE)
+.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
 

[05/18] hadoop git commit: MAPREDUCE-7158. Inefficient Flush Logic in JobHistory EventWriter. (Zichen Sun via wangda)

2018-11-14 Thread brahma
MAPREDUCE-7158. Inefficient Flush Logic in JobHistory EventWriter. (Zichen Sun 
via wangda)

Change-Id: I99ace87980da03bb35a8012cea7218d602a8817a


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/762a56cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/762a56cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/762a56cc

Branch: refs/heads/HDFS-13891
Commit: 762a56cc64bc07d57f94e253920534b8e049f238
Parents: 076b795
Author: Wangda Tan 
Authored: Tue Nov 13 11:25:41 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 11:25:41 2018 -0800

--
 .../java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/762a56cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
index 46e4f1a..b07c676 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -93,7 +93,6 @@ public class EventWriter {
 wrapper.setType(event.getEventType());
 wrapper.setEvent(event.getDatum());
 writer.write(wrapper, encoder);
-encoder.flush();
 if (this.jsonOutput) {
   out.writeBytes("\n");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/18] hadoop git commit: HADOOP-15876. Use keySet().removeAll() to remove multiple keys from Map in AzureBlobFileSystemStore

2018-11-14 Thread brahma
HADOOP-15876. Use keySet().removeAll() to remove multiple keys from Map in 
AzureBlobFileSystemStore

Contributed by Da Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a13be203
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a13be203
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a13be203

Branch: refs/heads/HDFS-13891
Commit: a13be203b7877ba56ef63aac4a2e65d4e1a4adbc
Parents: fcbd205
Author: Da Zhou 
Authored: Tue Nov 13 21:46:18 2018 +
Committer: Steve Loughran 
Committed: Tue Nov 13 21:46:18 2018 +

--
 .../org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java  | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a13be203/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index bfdbba8..f300a9a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -678,9 +678,7 @@ public class AzureBlobFileSystemStore {
   }
 }
 
-for (Map.Entry defaultAclEntry : 
defaultAclEntries.entrySet()) {
-  aclEntries.remove(defaultAclEntry.getKey());
-}
+aclEntries.keySet().removeAll(defaultAclEntries.keySet());
 
 client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, 
true),
 AbfsAclHelper.serializeAclSpec(aclEntries), eTag);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/18] hadoop git commit: HDFS-13845. RBF: The default MountTableResolver should fail resolving multi-destination paths. Contributed by yanghuafeng.

2018-11-14 Thread brahma
HDFS-13845. RBF: The default MountTableResolver should fail resolving 
multi-destination paths. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97a6e6df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97a6e6df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97a6e6df

Branch: refs/heads/HDFS-13891
Commit: 97a6e6df8a54b02ed7afbe8b403f17e3ec360257
Parents: b422fe2
Author: Brahma Reddy Battula 
Authored: Tue Oct 30 11:21:08 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Nov 14 18:01:29 2018 +0530

--
 .../federation/resolver/MountTableResolver.java | 15 +--
 .../resolver/TestMountTableResolver.java| 45 
 .../router/TestDisableNameservices.java | 36 ++--
 3 files changed, 70 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97a6e6df/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 121469f..9e69840 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -539,21 +539,28 @@ public class MountTableResolver
* @param entry Mount table entry.
* @return PathLocation containing the namespace, local path.
*/
-  private static PathLocation buildLocation(
-  final String path, final MountTable entry) {
-
+  private PathLocation buildLocation(
+  final String path, final MountTable entry) throws IOException {
 String srcPath = entry.getSourcePath();
 if (!path.startsWith(srcPath)) {
   LOG.error("Cannot build location, {} not a child of {}", path, srcPath);
   return null;
 }
+
+List dests = entry.getDestinations();
+if (getClass() == MountTableResolver.class && dests.size() > 1) {
+  throw new IOException("Cannnot build location, "
+  + getClass().getSimpleName()
+  + " should not resolve multiple destinations for " + path);
+}
+
 String remainingPath = path.substring(srcPath.length());
 if (remainingPath.startsWith(Path.SEPARATOR)) {
   remainingPath = remainingPath.substring(1);
 }
 
 List locations = new LinkedList<>();
-for (RemoteLocation oneDst : entry.getDestinations()) {
+for (RemoteLocation oneDst : dests) {
   String nsId = oneDst.getNameserviceId();
   String dest = oneDst.getDest();
   String newPath = dest;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97a6e6df/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index 5e3b861..14ccb61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -79,6 +79,8 @@ public class TestMountTableResolver {
* __usr
* bin -> 2:/bin
* __readonly -> 2:/tmp
+   * __multi -> 5:/dest1
+   *6:/dest2
*
* @throws IOException If it cannot set the mount table.
*/
@@ -126,6 +128,12 @@ public class TestMountTableResolver {
 MountTable readOnlyEntry = MountTable.newInstance("/readonly", map);
 readOnlyEntry.setReadOnly(true);
 mountTable.addEntry(readOnlyEntry);
+
+// /multi
+map = getMountTableEntry("5", "/dest1");
+map.put("6", "/dest2");
+MountTable multiEntry = MountTable.newInstance("/multi", map);
+mountTable.addEntry(multiEntry);
   }
 
   @Before
@@ -201,6 +209,17 @@ public class TestMountTableResolver {
 }
   }
 
+  @Test
+  public void testMuiltipleDestinations() throws IOException {
+try {
+  mountTable.getDestinationForPath("/multi");
+  fail("The getDestinationForPath call should fail.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(

[12/18] hadoop git commit: HDFS-12284. addendum to HDFS-12284. Contributed by Inigo Goiri.

2018-11-14 Thread brahma
HDFS-12284. addendum to HDFS-12284. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53f85101
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53f85101
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53f85101

Branch: refs/heads/HDFS-13891
Commit: 53f85101f911b960aee6c3eb734a1cab9670b2b2
Parents: c4705b9
Author: Brahma Reddy Battula 
Authored: Wed Nov 7 07:37:02 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Nov 14 18:01:29 2018 +0530

--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53f85101/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index 9f515bc..947c91a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -36,7 +36,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   org.bouncycastle
-  bcprov-jdk16
+  bcprov-jdk15on
   test
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/18] hadoop git commit: HDFS-14011. RBF: Add more information to HdfsFileStatus for a mount point. Contributed by Akira Ajisaka.

2018-11-14 Thread brahma
HDFS-14011. RBF: Add more information to HdfsFileStatus for a mount point. 
Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b422fe27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b422fe27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b422fe27

Branch: refs/heads/HDFS-13891
Commit: b422fe27141a342482798a61ee8a721c50a0c4aa
Parents: e040e17
Author: Yiqun Lin 
Authored: Tue Oct 23 14:34:29 2018 +0800
Committer: Brahma Reddy Battula 
Committed: Wed Nov 14 18:01:29 2018 +0530

--
 .../resolver/FileSubclusterResolver.java|  6 ++-
 .../federation/router/RouterClientProtocol.java | 30 ---
 .../router/RouterQuotaUpdateService.java|  9 ++--
 .../hdfs/server/federation/MockResolver.java| 17 +++---
 .../federation/router/TestRouterMountTable.java | 55 +++-
 .../router/TestRouterRpcMultiDestination.java   |  5 +-
 6 files changed, 97 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b422fe27/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
index 5aa5ec9..6432bb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
@@ -61,8 +61,10 @@ public interface FileSubclusterResolver {
* cache.
*
* @param path Path to get the mount points under.
-   * @return List of mount points present at this path or zero-length list if
-   * none are found.
+   * @return List of mount points present at this path. Return zero-length
+   * list if the path is a mount point but there are no mount points
+   * under the path. Return null if the path is not a mount point
+   * and there are no mount points under the path.
* @throws IOException Throws exception if the data is not available.
*/
   List getMountPoints(String path) throws IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b422fe27/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index ddbc014..de94eaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -718,6 +718,9 @@ public class RouterClientProtocol implements ClientProtocol 
{
   date = dates.get(src);
 }
 ret = getMountPointStatus(src, children.size(), date);
+  } else if (children != null) {
+// The src is a mount point, but there are no files or directories
+ret = getMountPointStatus(src, 0, 0);
   }
 }
 
@@ -1714,13 +1717,26 @@ public class RouterClientProtocol implements 
ClientProtocol {
 FsPermission permission = FsPermission.getDirDefault();
 String owner = this.superUser;
 String group = this.superGroup;
-try {
-  // TODO support users, it should be the user for the pointed folder
-  UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
-  owner = ugi.getUserName();
-  group = ugi.getPrimaryGroupName();
-} catch (IOException e) {
-  LOG.error("Cannot get the remote user: {}", e.getMessage());
+if (subclusterResolver instanceof MountTableResolver) {
+  try {
+MountTableResolver mountTable = (MountTableResolver) 
subclusterResolver;
+MountTable entry = mountTable.getMountPoint(name);
+if (entry != null) {
+  permission = entry.getMode();
+  owner = entry.getOwnerName();
+  group = entry.getGroupName();
+}
+  } catch (IOException e) {
+LOG.error("Cannot get mount point: {}", e.getMessage());
+  }
+} else {
+  try {
+UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
+   

[17/18] hadoop git commit: HDFS-13852. RBF: The DN_REPORT_TIME_OUT and DN_REPORT_CACHE_EXPIRE should be configured in RBFConfigKeys. Contributed by yanghuafeng.

2018-11-14 Thread brahma
HDFS-13852. RBF: The DN_REPORT_TIME_OUT and DN_REPORT_CACHE_EXPIRE should be 
configured in RBFConfigKeys. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6291f11d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6291f11d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6291f11d

Branch: refs/heads/HDFS-13891
Commit: 6291f11dcce604452829af1444df10ea6fc180fa
Parents: 53f8510
Author: Inigo Goiri 
Authored: Tue Nov 13 10:14:35 2018 -0800
Committer: Brahma Reddy Battula 
Committed: Wed Nov 14 18:01:29 2018 +0530

--
 .../federation/metrics/FederationMetrics.java   | 12 +--
 .../federation/metrics/NamenodeBeanMetrics.java | 22 
 .../server/federation/router/RBFConfigKeys.java |  7 +++
 .../src/main/resources/hdfs-rbf-default.xml | 17 +++
 .../router/TestRouterRPCClientRetries.java  |  2 +-
 .../server/federation/router/TestRouterRpc.java |  2 +-
 6 files changed, 40 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6291f11d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
index 23f62b6..6a0a46e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
@@ -47,12 +47,14 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.Router;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
 import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
@@ -95,7 +97,7 @@ public class FederationMetrics implements FederationMBean {
   private static final String DATE_FORMAT = "/MM/dd HH:mm:ss";
 
   /** Prevent holding the page from load too long. */
-  private static final long TIME_OUT = TimeUnit.SECONDS.toMillis(1);
+  private final long timeOut;
 
 
   /** Router interface. */
@@ -143,6 +145,12 @@ public class FederationMetrics implements FederationMBean {
   this.routerStore = stateStore.getRegisteredRecordStore(
   RouterStore.class);
 }
+
+// Initialize the cache for the DN reports
+Configuration conf = router.getConfig();
+this.timeOut = conf.getTimeDuration(RBFConfigKeys.DN_REPORT_TIME_OUT,
+RBFConfigKeys.DN_REPORT_TIME_OUT_MS_DEFAULT, TimeUnit.MILLISECONDS);
+
   }
 
   /**
@@ -434,7 +442,7 @@ public class FederationMetrics implements FederationMBean {
 try {
   RouterRpcServer rpcServer = this.router.getRpcServer();
   DatanodeInfo[] live = rpcServer.getDatanodeReport(
-  DatanodeReportType.LIVE, false, TIME_OUT);
+  DatanodeReportType.LIVE, false, timeOut);
 
   if (live.length > 0) {
 float totalDfsUsed = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6291f11d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
index e8ebf0d..da9a927 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
@@ -74,21 +74,6 @@ public class NamenodeBeanMetrics
   private 

[04/18] hadoop git commit: YARN-8918. [Submarine] Correct method usage of str.subString in CliUtils. (Zhankun Tang via wangda)

2018-11-14 Thread brahma
YARN-8918. [Submarine] Correct method usage of str.subString in CliUtils. 
(Zhankun Tang via wangda)

Change-Id: Id1f11dbab3aa838dee3a0ec8b8fd5dc32f5dd946


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/076b795b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/076b795b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/076b795b

Branch: refs/heads/HDFS-13891
Commit: 076b795b2e82d7f89cc91e0a8513c7081ee8b930
Parents: 671fd65
Author: Wangda Tan 
Authored: Tue Nov 13 11:24:15 2018 -0800
Committer: Wangda Tan 
Committed: Tue Nov 13 11:24:15 2018 -0800

--
 .../java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/076b795b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
index 05e830f..f85c82a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
@@ -72,7 +72,7 @@ public class CliUtils {
   resourcesStr = resourcesStr.substring(1);
 }
 if (resourcesStr.endsWith("]")) {
-  resourcesStr = resourcesStr.substring(0, resourcesStr.length());
+  resourcesStr = resourcesStr.substring(0, resourcesStr.length() - 1);
 }
 
 for (String resource : resourcesStr.trim().split(",")) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-774. Remove OpenContainerBlockMap from datanode. Contributed by Shashikant Banerjee.

2018-11-14 Thread shashikant
Repository: hadoop
Updated Branches:
  refs/heads/trunk a94828170 -> b57cc73f8


HDDS-774. Remove OpenContainerBlockMap from datanode. Contributed by Shashikant 
Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b57cc73f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b57cc73f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b57cc73f

Branch: refs/heads/trunk
Commit: b57cc73f837ecb79ed275fc6e50ffce684baf573
Parents: a948281
Author: Shashikant Banerjee 
Authored: Wed Nov 14 20:05:56 2018 +0530
Committer: Shashikant Banerjee 
Committed: Wed Nov 14 20:05:56 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java |  54 +---
 .../common/impl/TestCloseContainerHandler.java  | 261 ---
 .../TestGetCommittedBlockLengthAndPutKey.java   |  71 -
 3 files changed, 2 insertions(+), 384 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b57cc73f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index d8c23bf..f970c72 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -54,7 +54,6 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.OpenContainerBlockMap;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
@@ -109,7 +108,6 @@ public class KeyValueHandler extends Handler {
   private final VolumeChoosingPolicy volumeChoosingPolicy;
   private final long maxContainerSize;
   private final AutoCloseableLock handlerLock;
-  private final OpenContainerBlockMap openContainerBlockMap;
 
   public KeyValueHandler(Configuration config, ContainerSet contSet,
   VolumeSet volSet, ContainerMetrics metrics) {
@@ -138,21 +136,12 @@ public class KeyValueHandler extends Handler {
 // this handler lock is used for synchronizing createContainer Requests,
 // so using a fair lock here.
 handlerLock = new AutoCloseableLock(new ReentrantLock(true));
-openContainerBlockMap = new OpenContainerBlockMap();
   }
 
   @VisibleForTesting
   public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() {
 return volumeChoosingPolicy;
   }
-  /**
-   * Returns OpenContainerBlockMap instance.
-   *
-   * @return OpenContainerBlockMap
-   */
-  public OpenContainerBlockMap getOpenContainerBlockMap() {
-return openContainerBlockMap;
-  }
 
   @Override
   public ContainerCommandResponseProto handle(
@@ -355,7 +344,6 @@ public class KeyValueHandler extends Handler {
   } else {
 long containerId = kvContainer.getContainerData().getContainerID();
 containerSet.removeContainer(containerId);
-openContainerBlockMap.removeContainer(containerId);
 // Release the lock first.
 // Avoid holding write locks for disk operations
 kvContainer.writeUnlock();
@@ -388,19 +376,11 @@ public class KeyValueHandler extends Handler {
 long containerID = kvContainer.getContainerData().getContainerID();
 try {
   checkContainerOpen(kvContainer);
-  KeyValueContainerData kvData = kvContainer.getContainerData();
-
-  // remove the container from open block map once, all the blocks
-  // have been committed and the container is closed
-  commitPendingBlocks(kvContainer);
-
   // TODO : The close command should move the container to either quasi
   // closed/closed depending upon how the closeContainer gets executed.
   // If it arrives by Standalone, it will be moved to Quasi Closed or
   // otherwise moved to Closed state if it gets executed via Ratis.
   kvContainer.close();
-  // make sure the the container open keys from BlockMap gets removed
-  openContainerBlockMap.removeContainer(kvData.getContainerID());
 } catch (StorageContainerException ex) {
   if (ex.getResult() == CLOSED_CONTAINER_IO) {
 LOG.debug("Container {} is already closed.", containerID);
@@ 

hadoop git commit: YARN-8672. Improve token filename management for localization. Contributed by Chandni Singh

2018-11-14 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk b57cc73f8 -> 21ec4bdae


YARN-8672.  Improve token filename management for localization.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21ec4bda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21ec4bda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21ec4bda

Branch: refs/heads/trunk
Commit: 21ec4bdaef4b68adbbf4f33a6f74494c074f803c
Parents: b57cc73
Author: Eric Yang 
Authored: Wed Nov 14 15:22:01 2018 -0500
Committer: Eric Yang 
Committed: Wed Nov 14 15:22:01 2018 -0500

--
 .../server/nodemanager/ContainerExecutor.java   |  1 +
 .../nodemanager/DefaultContainerExecutor.java   | 10 +++
 .../nodemanager/LinuxContainerExecutor.java |  9 ---
 .../WindowsSecureContainerExecutor.java |  6 ++---
 .../launcher/ContainerLaunch.java   |  4 +--
 .../launcher/ContainerRelaunch.java |  3 +--
 .../localizer/ContainerLocalizer.java   | 28 
 .../localizer/ResourceLocalizationService.java  | 14 +-
 .../TestDefaultContainerExecutor.java   |  9 ---
 .../nodemanager/TestLinuxContainerExecutor.java |  6 ++---
 .../TestLinuxContainerExecutorWithMocks.java|  3 ++-
 .../containermanager/TestContainerManager.java  |  2 +-
 .../launcher/TestContainerLaunch.java   |  2 +-
 .../localizer/TestContainerLocalizer.java   | 11 +---
 14 files changed, 61 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21ec4bda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 77b7859..2ef5725 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -81,6 +81,7 @@ public abstract class ContainerExecutor implements 
Configurable {
   private static final Logger LOG =
LoggerFactory.getLogger(ContainerExecutor.class);
   protected static final String WILDCARD = "*";
+  public static final String TOKEN_FILE_NAME_FMT = "%s.tokens";
 
   /**
* The permissions to use when creating the launch script.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21ec4bda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index a500c02..a337edd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -164,8 +164,7 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 // randomly choose the local directory
 Path appStorageDir = getWorkingDir(localDirs, user, appId);
 
-String tokenFn =
-String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
+String tokenFn = String.format(TOKEN_FILE_NAME_FMT, locId);
 Path tokenDst = new Path(appStorageDir, tokenFn);
 copyFile(nmPrivateContainerTokensPath, tokenDst, user);
 LOG.info("Copying from " + nmPrivateContainerTokensPath
@@ -180,7 +179,8 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 + localizerFc.getWorkingDirectory());
 
 ContainerLocalizer localizer =
-createContainerLocalizer(user, appId, locId, localDirs, localizerFc);
+createContainerLocalizer(user, appId, locId, tokenFn, localDirs,
+localizerFc);

hadoop git commit: HDFS-14035. NN status discovery does not leverage delegation token. Contributed by Chen Liang.

2018-11-14 Thread cliang
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 8b5277fd1 -> 481b61825


HDFS-14035. NN status discovery does not leverage delegation token. Contributed 
by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481b6182
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481b6182
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481b6182

Branch: refs/heads/HDFS-12943
Commit: 481b6182501178d3db7438bb0c38faf20914e276
Parents: 8b5277f
Author: Chen Liang 
Authored: Wed Nov 14 13:32:13 2018 -0800
Committer: Chen Liang 
Committed: Wed Nov 14 13:32:13 2018 -0800

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 13 +
 .../hadoop/hdfs/NameNodeProxiesClient.java  | 31 ---
 .../hadoop/hdfs/protocol/ClientProtocol.java| 13 -
 .../ClientNamenodeProtocolTranslatorPB.java | 28 ++
 .../ha/AbstractNNFailoverProxyProvider.java | 33 ++--
 .../namenode/ha/IPFailoverProxyProvider.java|  2 +-
 .../namenode/ha/ObserverReadProxyProvider.java  |  9 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto | 10 
 .../hadoop/hdfs/protocol/TestReadOnly.java  |  3 +-
 .../native/libhdfspp/lib/proto/CMakeLists.txt   |  1 +
 .../federation/router/RouterClientProtocol.java |  8 +++
 .../federation/router/RouterRpcServer.java  |  7 +++
 ...tNamenodeProtocolServerSideTranslatorPB.java | 36 +
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 +++
 .../ha/TestConsistentReadsObserver.java |  3 ++
 .../ha/TestObserverReadProxyProvider.java   | 57 
 16 files changed, 161 insertions(+), 99 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481b6182/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 85ada8d..d96101b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -3195,4 +3196,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   public void msync() throws IOException {
 namenode.msync();
   }
+
+  /**
+   * An unblocking call to get the HA service state of NameNode.
+   *
+   * @return HA state of NameNode
+   * @throws IOException
+   */
+  @VisibleForTesting
+  public HAServiceProtocol.HAServiceState getHAServiceState()
+  throws IOException {
+return namenode.getHAServiceState();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481b6182/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
index 265d9dc..c640b39 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
@@ -25,16 +25,13 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.HAServiceProtocol;
-import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
 import org.apache.hadoop.ipc.AlignmentContext;
-import org.apache.hadoop.ipc.Client;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -352,34 +349,6 @@ public class NameNodeProxiesClient {
 fallbackToSimpleAuth, null);
   }
 
-  /**
-   * Creates a non-HA proxy object with {@link