[hadoop] branch trunk updated: HDDS-1469. Generate default configuration fragments based on annotations

2019-05-02 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e2f0f72  HDDS-1469. Generate default configuration fragments based on 
annotations
e2f0f72 is described below

commit e2f0f7267791051b561a6e291a22bbc58c34d068
Author: Márton Elek 
AuthorDate: Thu May 2 12:14:43 2019 +0200

HDDS-1469. Generate default configuration fragments based on annotations

Closes #773
---
 hadoop-hdds/common/pom.xml |   5 +
 .../hadoop/hdds/conf/OzoneConfiguration.java   |  29 -
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |  12 --
 .../common/src/main/resources/ozone-default.xml|  20 
 .../hadoop/hdds/conf/SimpleConfiguration.java  |  15 ++-
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   |   4 +-
 hadoop-hdds/config/pom.xml |  66 +++
 .../java/org/apache/hadoop/hdds/conf/Config.java   |  12 ++
 .../hadoop/hdds/conf/ConfigFileAppender.java   | 127 +
 .../hadoop/hdds/conf/ConfigFileGenerator.java  | 113 ++
 .../org/apache/hadoop/hdds/conf/ConfigGroup.java   |   0
 .../org/apache/hadoop/hdds/conf/ConfigTag.java}|  32 --
 .../org/apache/hadoop/hdds/conf/ConfigType.java|   0
 .../hadoop/hdds/conf/ConfigurationException.java   |   2 +-
 .../org/apache/hadoop/hdds/conf/package-info.java} |  22 +---
 .../services/javax.annotation.processing.Processor |  16 +++
 .../hadoop/hdds/conf/ConfigurationExample.java}|  28 +++--
 .../hadoop/hdds/conf/TestConfigFileAppender.java}  |  34 --
 .../org/apache/hadoop/hdds/conf/package-info.java} |  18 +--
 hadoop-hdds/pom.xml|   7 ++
 .../hdds/scm/container/ReplicationManager.java |  35 +-
 .../hdds/scm/container/TestReplicationManager.java |  14 +++
 .../src/test/resources/core-site.xml   |  24 
 .../src/test/resources/hdfs-site.xml   |  24 
 24 files changed, 547 insertions(+), 112 deletions(-)

diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 5d1bb52..51560ca 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -37,6 +37,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 
+  org.apache.hadoop
+  hadoop-hdds-config
+
+
+
   javax.annotation
   javax.annotation-api
   1.2
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index b4dc94a..b32ad63 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.hdds.conf;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBException;
 import javax.xml.bind.Unmarshaller;
@@ -28,6 +25,7 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
+import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.URL;
@@ -36,6 +34,9 @@ import java.util.Enumeration;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
 /**
  * Configuration for ozone.
  */
@@ -47,12 +48,33 @@ public class OzoneConfiguration extends Configuration {
 
   public OzoneConfiguration() {
 OzoneConfiguration.activate();
+loadDefaults();
   }
 
   public OzoneConfiguration(Configuration conf) {
 super(conf);
 //load the configuration from the classloader of the original conf.
 setClassLoader(conf.getClassLoader());
+if (!(conf instanceof OzoneConfiguration)) {
+  loadDefaults();
+}
+  }
+
+  private void loadDefaults() {
+try {
+  //there could be multiple ozone-default-generated.xml files on the
+  // classpath, which are generated by the annotation processor.
+  // Here we add all of them to the list of the available configuration.
+  Enumeration generatedDefaults =
+  OzoneConfiguration.class.getClassLoader().getResources(
+  "ozone-default-generated.xml");
+  while (generatedDefaults.hasMoreElements()) {
+addResource(generatedDefaults.nextElement());
+  }
+} catch (IOException e) {
+  e.printStackTrace();
+}
+addResource("ozone-site.xml");
   }
 
   public List readPropertyFromXml(URL url) throws JAXBException {
@@ -265,7 +287,6 @@ public 

[hadoop] branch trunk updated: HADOOP-16183. Use latest Yetus to support ozone specific build process

2019-05-02 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6a42745  HADOOP-16183. Use latest Yetus to support ozone specific 
build process
6a42745 is described below

commit 6a427456eb5a0fe64c2696ca298eceb1267cb07b
Author: Márton Elek 
AuthorDate: Thu May 2 16:48:30 2019 +0200

HADOOP-16183. Use latest Yetus to support ozone specific build process

Closes #599
---
 Jenkinsfile | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/Jenkinsfile b/Jenkinsfile
index 82b4983..11cbb91 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -35,7 +35,7 @@ pipeline {
 DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile"
 YETUS='yetus'
 // Branch or tag name.  Yetus release tags are 'rel/X.Y.Z'
-YETUS_VERSION='rel/0.9.0'
+YETUS_VERSION='rel/0.10.0'
 }
 
 parameters {
@@ -206,4 +206,4 @@ pipeline {
 }
 }
 }
-}
\ No newline at end of file
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1482. Use strongly typed codec implementations for the S3Table

2019-05-02 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4605db3  HDDS-1482. Use strongly typed codec implementations for the 
S3Table
4605db3 is described below

commit 4605db369e4315f6d28e6c050acd3f3c6fbec45c
Author: Bharat Viswanadham 
AuthorDate: Thu May 2 13:56:30 2019 +0200

HDDS-1482. Use strongly typed codec implementations for the S3Table

Closes #789
---
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  5 +-
 .../hadoop/ozone/om/S3SecretManagerImpl.java   | 21 ++
 .../hadoop/ozone/om/codec/S3SecretValueCodec.java  | 57 ++
 .../hadoop/ozone/om/helpers/S3SecretValue.java | 20 +
 .../ozone/om/codec/TestS3SecretValueCodec.java | 88 ++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java | 14 ++--
 .../hadoop/ozone/om/S3BucketManagerImpl.java   | 21 ++
 7 files changed, 191 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index d484e9d..3149b86 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.utils.db.DBStore;
@@ -259,7 +260,7 @@ public interface OMMetadataManager {
* @return Table.
*/
 
-  Table getS3Table();
+  Table getS3Table();
 
   /**
* Returns the DB key name of a multipart upload key in OM metadata store.
@@ -285,7 +286,7 @@ public interface OMMetadataManager {
* Gets the S3 Secrets table.
* @return Table
*/
-  Table getS3SecretTable();
+  Table getS3SecretTable();
 
   /**
* Returns number of rows in a table.  This should not be used for very
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
index 44712d5..c76a757 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
@@ -23,7 +23,6 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.security.OzoneSecurityException;
 import org.apache.logging.log4j.util.Strings;
 import org.slf4j.Logger;
@@ -31,7 +30,6 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
 import static 
org.apache.hadoop.ozone.security.OzoneSecurityException.ResultCodes.S3_SECRET_NOT_FOUND;
 
 /**
@@ -61,20 +59,17 @@ public class S3SecretManagerImpl implements S3SecretManager 
{
   public S3SecretValue getS3Secret(String kerberosID) throws IOException {
 Preconditions.checkArgument(Strings.isNotBlank(kerberosID),
 "kerberosID cannot be null or empty.");
-byte[] awsAccessKey = kerberosID.getBytes(UTF_8);
 S3SecretValue result = null;
 omMetadataManager.getLock().acquireS3SecretLock(kerberosID);
 try {
-  byte[] s3Secret =
-  omMetadataManager.getS3SecretTable().get(awsAccessKey);
+  S3SecretValue s3Secret =
+  omMetadataManager.getS3SecretTable().get(kerberosID);
   if(s3Secret == null) {
 byte[] secret = OmUtils.getSHADigest();
 result = new S3SecretValue(kerberosID, DigestUtils.sha256Hex(secret));
-omMetadataManager.getS3SecretTable()
-.put(awsAccessKey, result.getProtobuf().toByteArray());
+omMetadataManager.getS3SecretTable().put(kerberosID, result);
   } else {
-result = S3SecretValue.fromProtobuf(
-OzoneManagerProtocolProtos.S3Secret.parseFrom(s3Secret));
+return s3Secret;
   }
 } finally {
   omMetadataManager.getLock().releaseS3SecretLock(kerberosID);
@@ -90,11 +85,10 @@ public class S3SecretManagerImpl implements S3SecretManager 
{
 "awsAccessKeyId cannot be null or empty.");
 LOG.trace("Get secret for awsAccessKey:{}", kerberosID);
 
-byte[] 

[hadoop] branch trunk updated: HDDS-1479. Update S3.md documentation

2019-05-02 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3cb1d09  HDDS-1479. Update S3.md documentation
3cb1d09 is described below

commit 3cb1d09b2eb5f75e91b1a90986845f639bc68487
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu May 2 11:48:32 2019 +0200

HDDS-1479. Update S3.md documentation

Closes #791
---
 hadoop-hdds/docs/content/S3.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/docs/content/S3.md b/hadoop-hdds/docs/content/S3.md
index a928164..5a9e4a5 100644
--- a/hadoop-hdds/docs/content/S3.md
+++ b/hadoop-hdds/docs/content/S3.md
@@ -82,8 +82,8 @@ Operation on Objects:
 Endpoint| Status  | Notes
 |-|---
 PUT Object  | implemented |
-GET Object  | implemented | Range headers are not 
supported
-Multipart Uplad | implemented |Except the listing of the 
current MultiPartUploads.
+GET Object  | implemented |
+Multipart Upload| implemented | Except the listing of 
the current MultiPartUploads.
 DELETE Object   | implemented |
 HEAD Object | implemented |
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9285: RM UI progress column is of wrong type. Contributed by Ahmed Hussein.

2019-05-02 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 41ffaea  YARN-9285: RM UI progress column is of wrong type. 
Contributed by  Ahmed Hussein.
41ffaea is described below

commit 41ffaea34292aa1011a5f87c336fb12924f2e948
Author: Eric E Payne 
AuthorDate: Thu May 2 19:39:26 2019 +

YARN-9285: RM UI progress column is of wrong type. Contributed by  Ahmed 
Hussein.

(cherry picked from commit b094b94d43a46af9ddb910da24f792b95f614b08)
---
 .../hadoop/yarn/server/webapp/WebPageUtils.java| 24 ++
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index 8ab5de1..06a5f1d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -47,21 +47,27 @@ public class WebPageUtils {
 
   private static String getAppsTableColumnDefs(
   boolean isFairSchedulerPage, boolean isResourceManager) {
+// default progress column index is 11
+String progressIndex = "[11]";
 StringBuilder sb = new StringBuilder();
 sb.append("[\n")
   .append("{'sType':'natural', 'aTargets': [0]")
   .append(", 'mRender': parseHadoopID }")
   .append("\n, {'sType':'num-ignore-str', 'aTargets': [6, 7, 8]")
-  .append(", 'mRender': renderHadoopDate }")
-  .append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 15] 
}")
-  .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
-if (isFairSchedulerPage) {
-  sb.append("[15]");
-} else if (isResourceManager) {
-  sb.append("[17]");
-} else {
-  sb.append("[10]");
+  .append(", 'mRender': renderHadoopDate }");
+if (isResourceManager) {
+  // Update following line if any column added in RM page before column 11
+  sb.append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 
15] }");
+  // set progress column index to 18
+  progressIndex = "[18]";
+} else if (isFairSchedulerPage) {
+  // Update following line if any column added in scheduler page before 
column 11
+  sb.append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 
15] }");
+  // set progress column index to 16
+  progressIndex = "[16]";
 }
+sb.append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
+sb.append(progressIndex);
 sb.append(", 'mRender': parseHadoopProgress }]");
 return sb.toString();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14453. Improve Bad Sequence Number Error Message. Contributed by Shweta.

2019-05-02 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d6b7609  HDFS-14453. Improve Bad Sequence Number Error Message. 
Contributed by Shweta.
d6b7609 is described below

commit d6b7609c9674c3d0175868d7190293f1925d779b
Author: Shweta 
AuthorDate: Thu May 2 14:24:04 2019 -0700

HDFS-14453. Improve Bad Sequence Number Error Message. Contributed by 
Shweta.

Signed-off-by: Wei-Chiu Chuang 
---
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 54425b1..1a19517 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -1152,9 +1152,9 @@ class DataStreamer extends Daemon {
 one = ackQueue.getFirst();
   }
   if (one.getSeqno() != seqno) {
-throw new IOException("ResponseProcessor: Expecting seqno" +
-" for block " + block +
-one.getSeqno() + " but received " + seqno);
+throw new IOException("ResponseProcessor: Expecting seqno " +
+one.getSeqno() + " for block " + block +
+" but received " + seqno);
   }
   isLastPacketInBlock = one.isLastPacketInBlock();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9285: RM UI progress column is of wrong type. Contributed by Ahmed Hussein.

2019-05-02 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b094b94  YARN-9285: RM UI progress column is of wrong type. 
Contributed by  Ahmed Hussein.
b094b94 is described below

commit b094b94d43a46af9ddb910da24f792b95f614b08
Author: Eric E Payne 
AuthorDate: Thu May 2 19:39:26 2019 +

YARN-9285: RM UI progress column is of wrong type. Contributed by  Ahmed 
Hussein.
---
 .../hadoop/yarn/server/webapp/WebPageUtils.java| 24 ++
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index 8ab5de1..06a5f1d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -47,21 +47,27 @@ public class WebPageUtils {
 
   private static String getAppsTableColumnDefs(
   boolean isFairSchedulerPage, boolean isResourceManager) {
+// default progress column index is 11
+String progressIndex = "[11]";
 StringBuilder sb = new StringBuilder();
 sb.append("[\n")
   .append("{'sType':'natural', 'aTargets': [0]")
   .append(", 'mRender': parseHadoopID }")
   .append("\n, {'sType':'num-ignore-str', 'aTargets': [6, 7, 8]")
-  .append(", 'mRender': renderHadoopDate }")
-  .append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 15] 
}")
-  .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
-if (isFairSchedulerPage) {
-  sb.append("[15]");
-} else if (isResourceManager) {
-  sb.append("[17]");
-} else {
-  sb.append("[10]");
+  .append(", 'mRender': renderHadoopDate }");
+if (isResourceManager) {
+  // Update following line if any column added in RM page before column 11
+  sb.append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 
15] }");
+  // set progress column index to 18
+  progressIndex = "[18]";
+} else if (isFairSchedulerPage) {
+  // Update following line if any column added in scheduler page before 
column 11
+  sb.append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 
15] }");
+  // set progress column index to 16
+  progressIndex = "[16]";
 }
+sb.append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
+sb.append(progressIndex);
 sb.append(", 'mRender': parseHadoopProgress }]");
 return sb.toString();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1473. DataNode ID file should be human readable. (#781)

2019-05-02 Thread hanishakoneru
This is an automated email from the ASF dual-hosted git repository.

hanishakoneru pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1df6799  HDDS-1473. DataNode ID file should be human readable. (#781)
1df6799 is described below

commit 1df679985be187ef773daae37816ddf1df2e411a
Author: Siddharth 
AuthorDate: Thu May 2 13:59:15 2019 -0700

HDDS-1473. DataNode ID file should be human readable. (#781)
---
 .../container/common/helpers/ContainerUtils.java   |  45 ++---
 .../container/common/helpers/DatanodeIdYaml.java   | 182 +
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |   9 +
 3 files changed, 207 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 355fd7c..770435e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -18,41 +18,33 @@
 
 package org.apache.hadoop.ozone.container.common.helpers;
 
-import com.google.common.base.Preconditions;
+import static org.apache.commons.io.FilenameUtils.removeExtension;
+import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_CHECKSUM_ERROR;
+import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_ALGORITHM;
+import static 
org.apache.hadoop.ozone.container.common.impl.ContainerData.CHARSET_ENCODING;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Paths;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-.ContainerCommandResponseProto;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-.StorageContainerException;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.file.Paths;
 import org.yaml.snakeyaml.Yaml;
 
-import static org.apache.commons.io.FilenameUtils.removeExtension;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-.Result.CONTAINER_CHECKSUM_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-.Result.NO_SUCH_ALGORITHM;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerData
-.CHARSET_ENCODING;
+import com.google.common.base.Preconditions;
 
 /**
  * A set of helper functions to create proper responses.
@@ -211,11 +203,7 @@ public final class ContainerUtils {
 throw new IOException("Unable to create datanode ID directories.");
   }
 }
-try (FileOutputStream out = new FileOutputStream(path)) {
-  HddsProtos.DatanodeDetailsProto proto =
-  datanodeDetails.getProtoBufMessage();
-  proto.writeTo(out);
-}
+DatanodeIdYaml.createDatanodeIdFile(datanodeDetails, path);
   }
 
   /**
@@ -230,9 +218,8 @@ public final class ContainerUtils {
 if (!path.exists()) {
   throw new IOException("Datanode ID file not found.");
 }
-try(FileInputStream in = new FileInputStream(path)) {
-  return DatanodeDetails.getFromProtoBuf(
-  HddsProtos.DatanodeDetailsProto.parseFrom(in));
+try {
+  return DatanodeIdYaml.readDatanodeIdFile(path);
 } catch (IOException e) {
   throw new IOException("Failed to parse DatanodeDetails from "
   + path.getAbsolutePath(), e);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java
 

[hadoop] branch branch-3.2 updated: YARN-9285: RM UI progress column is of wrong type. Contributed by Ahmed Hussein.

2019-05-02 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 6fce24f  YARN-9285: RM UI progress column is of wrong type. 
Contributed by  Ahmed Hussein.
6fce24f is described below

commit 6fce24fb4028beb9876f3631cd7e9e7a67f2fd72
Author: Eric E Payne 
AuthorDate: Thu May 2 19:39:26 2019 +

YARN-9285: RM UI progress column is of wrong type. Contributed by  Ahmed 
Hussein.

(cherry picked from commit b094b94d43a46af9ddb910da24f792b95f614b08)
---
 .../hadoop/yarn/server/webapp/WebPageUtils.java| 24 ++
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index 8ab5de1..06a5f1d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -47,21 +47,27 @@ public class WebPageUtils {
 
   private static String getAppsTableColumnDefs(
   boolean isFairSchedulerPage, boolean isResourceManager) {
+// default progress column index is 11
+String progressIndex = "[11]";
 StringBuilder sb = new StringBuilder();
 sb.append("[\n")
   .append("{'sType':'natural', 'aTargets': [0]")
   .append(", 'mRender': parseHadoopID }")
   .append("\n, {'sType':'num-ignore-str', 'aTargets': [6, 7, 8]")
-  .append(", 'mRender': renderHadoopDate }")
-  .append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 15] 
}")
-  .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
-if (isFairSchedulerPage) {
-  sb.append("[15]");
-} else if (isResourceManager) {
-  sb.append("[17]");
-} else {
-  sb.append("[10]");
+  .append(", 'mRender': renderHadoopDate }");
+if (isResourceManager) {
+  // Update following line if any column added in RM page before column 11
+  sb.append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 
15] }");
+  // set progress column index to 18
+  progressIndex = "[18]";
+} else if (isFairSchedulerPage) {
+  // Update following line if any column added in scheduler page before 
column 11
+  sb.append("\n, {'sType':'num-ignore-str', 'aTargets': [11, 12, 13, 14, 
15] }");
+  // set progress column index to 16
+  progressIndex = "[16]";
 }
+sb.append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
+sb.append(progressIndex);
 sb.append(", 'mRender': parseHadoopProgress }]");
 return sb.toString();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16282. Avoid FileStream to improve performance. Contributed by Ayush Saxena.

2019-05-02 Thread gifuma
This is an automated email from the ASF dual-hosted git repository.

gifuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7a3188d  HADOOP-16282. Avoid FileStream to improve performance. 
Contributed by Ayush Saxena.
7a3188d is described below

commit 7a3188d054481b9bd563e337901e93476303ce7f
Author: Giovanni Matteo Fumarola 
AuthorDate: Thu May 2 12:58:42 2019 -0700

HADOOP-16282. Avoid FileStream to improve performance. Contributed by Ayush 
Saxena.
---
 .../java/org/apache/hadoop/conf/Configuration.java |  4 +--
 .../hadoop/crypto/random/OsSecureRandom.java   |  9 +++---
 .../main/java/org/apache/hadoop/fs/FileUtil.java   | 17 +--
 .../org/apache/hadoop/fs/shell/CopyCommands.java   |  7 +++--
 .../org/apache/hadoop/metrics2/sink/FileSink.java  |  6 ++--
 .../java/org/apache/hadoop/net/TableMapping.java   |  6 ++--
 .../org/apache/hadoop/security/Credentials.java|  4 +--
 .../java/org/apache/hadoop/security/KDiag.java |  4 +--
 .../apache/hadoop/security/LdapGroupsMapping.java  |  5 ++--
 .../hadoop/security/ShellBasedIdMapping.java   |  4 +--
 .../security/alias/LocalJavaKeyStoreProvider.java  |  6 ++--
 .../security/ssl/FileBasedKeyStoresFactory.java|  5 ++--
 .../security/ssl/ReloadingX509TrustManager.java|  5 ++--
 .../main/java/org/apache/hadoop/util/ConfTest.java |  4 +--
 .../org/apache/hadoop/util/FileBasedIPList.java|  4 +--
 .../org/apache/hadoop/util/HostsFileReader.java|  5 ++--
 .../org/apache/hadoop/util/JsonSerialization.java  |  4 +--
 .../main/java/org/apache/hadoop/util/RunJar.java   |  8 ++---
 .../java/org/apache/hadoop/util/SysInfoLinux.java  | 35 --
 .../org/apache/hadoop/util/hash/JenkinsHash.java   |  6 ++--
 .../apache/hadoop/util/TestHostsFileReader.java| 10 +++
 .../hadoop/hdfs/util/CombinedHostsFileReader.java  |  9 +++---
 .../hadoop/hdfs/util/CombinedHostsFileWriter.java  |  7 +++--
 .../fs/http/server/HttpFSAuthenticationFilter.java |  7 +++--
 .../java/org/apache/hadoop/lib/server/Server.java  |  4 +--
 .../hadoop/hdfs/qjournal/server/Journal.java   |  3 +-
 .../hdfs/server/aliasmap/InMemoryAliasMap.java |  5 ++--
 .../datanode/fsdataset/impl/BlockPoolSlice.java|  3 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  3 +-
 .../datanode/fsdataset/impl/FsDatasetUtil.java |  3 +-
 .../hdfs/server/diskbalancer/command/Command.java  |  4 +--
 .../server/namenode/EditLogFileInputStream.java|  4 +--
 .../hadoop/hdfs/server/namenode/FSImageFormat.java |  9 +++---
 .../FSImagePreTransactionalStorageInspector.java   |  5 ++--
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  5 ++--
 .../org/apache/hadoop/hdfs/tools/DebugAdmin.java   |  4 +--
 .../OfflineEditsVisitorFactory.java|  5 ++--
 .../OfflineImageReconstructor.java | 12 
 .../offlineImageViewer/OfflineImageViewer.java |  6 ++--
 .../offlineImageViewer/TextWriterImageVisitor.java |  6 ++--
 .../org/apache/hadoop/hdfs/util/MD5FileUtils.java  |  8 ++---
 .../org/apache/hadoop/tools/RegexCopyFilter.java   |  4 +--
 42 files changed, 148 insertions(+), 126 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 6251cd4..c30ce0d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -30,7 +30,6 @@ import java.io.BufferedInputStream;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
@@ -43,6 +42,7 @@ import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
 import java.net.URL;
 import java.net.URLConnection;
+import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -3075,7 +3075,7 @@ public class Configuration implements 
Iterable>,
   LOG.debug("parsing File " + file);
 }
 reader = (XMLStreamReader2)parse(new BufferedInputStream(
-new FileInputStream(file)), ((Path)resource).toString(),
+Files.newInputStream(file.toPath())), ((Path) resource).toString(),
 isRestricted);
   }
 } else if (resource instanceof InputStream) {
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
index 6671591..8e191b5 100644
--- 

[hadoop] branch trunk updated: HDFS-14460. DFSUtil#getNamenodeWebAddr should return HTTPS address based on policy configured. Contributed by CR Hota.

2019-05-02 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 865c328  HDFS-14460. DFSUtil#getNamenodeWebAddr should return HTTPS 
address based on policy configured. Contributed by CR Hota.
865c328 is described below

commit 865c3289308327788f3bed355864c510deb40956
Author: Inigo Goiri 
AuthorDate: Thu May 2 10:09:21 2019 -0700

HDFS-14460. DFSUtil#getNamenodeWebAddr should return HTTPS address based on 
policy configured. Contributed by CR Hota.
---
 .../main/java/org/apache/hadoop/hdfs/DFSUtil.java  | 12 ++---
 .../java/org/apache/hadoop/hdfs/TestDFSUtil.java   | 31 ++
 2 files changed, 39 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 6a21dfc..aa643c8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1329,11 +1329,15 @@ public class DFSUtil {
   nsId = getOnlyNameServiceIdOrNull(conf);
 }
 
+String webAddrBaseKey = DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+String webAddrDefault = DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
+if (getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY) {
+  webAddrBaseKey = DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
+  webAddrDefault = DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
+}
 String webAddrKey = DFSUtilClient.concatSuffixes(
-DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nsId, nnId);
-
-String webAddr =
-conf.get(webAddrKey, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
+webAddrBaseKey, nsId, nnId);
+String webAddr = conf.get(webAddrKey, webAddrDefault);
 return webAddr;
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index afe34ad..358edb8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
@@ -70,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import 
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.alias.CredentialProvider;
@@ -87,6 +90,8 @@ public class TestDFSUtil {
   static final String NS1_NN_ADDR= "ns1-nn.example.com:8020";
   static final String NS1_NN1_ADDR   = "ns1-nn1.example.com:8020";
   static final String NS1_NN2_ADDR   = "ns1-nn2.example.com:8020";
+  static final String NS1_NN1_HTTPS_ADDR   = "ns1-nn1.example.com:50740";
+  static final String NS1_NN1_HTTP_ADDR= "ns1-nn1.example.com:50070";
 
   /**
* Reset to default UGI settings since some tests change them.
@@ -466,6 +471,32 @@ public class TestDFSUtil {
   }
 
   @Test
+  public void testGetNamenodeWebAddr() {
+HdfsConfiguration conf = new HdfsConfiguration();
+
+conf.set(DFSUtil.addKeySuffixes(
+DFS_NAMENODE_HTTPS_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HTTPS_ADDR);
+conf.set(DFSUtil.addKeySuffixes(
+DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HTTP_ADDR);
+
+conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+String httpsOnlyWebAddr = DFSUtil.getNamenodeWebAddr(
+conf, "ns1", "nn1");
+assertEquals(NS1_NN1_HTTPS_ADDR, httpsOnlyWebAddr);
+
+conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTP_ONLY.name());
+String httpOnlyWebAddr = DFSUtil.getNamenodeWebAddr(
+conf, "ns1", "nn1");
+assertEquals(NS1_NN1_HTTP_ADDR, 

[hadoop] branch HDFS-13891 updated (aeb3b61 -> 893c708)

2019-05-02 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a change to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 discard aeb3b61  HDFS-14457. RBF: Add order text SPACE in CLI command 'hdfs 
dfsrouteradmin'. Contributed by luhuachao.
 discard 55f2f7a  HDFS-13972. RBF: Support for Delegation Token (WebHDFS). 
Contributed by CR Hota.
 discard bd3161e  HDFS-14422. RBF: Router shouldn't allow READ operations in 
safe mode. Contributed by Inigo Goiri.
 discard e508ab9  HDFS-14369. RBF: Fix trailing / for webhdfs. Contributed by 
Akira Ajisaka.
 discard 007b8ea  HDFS-13853. RBF: RouterAdmin update cmd is overwriting the 
entry not updating the existing. Contributed by Ayush Saxena.
 discard 2577e3e  HDFS-14316. RBF: Support unavailable subclusters for mount 
points with multiple destinations. Contributed by Inigo Goiri.
 discard d10765d  HDFS-14388. RBF: Prevent loading metric system when disabled. 
Contributed by Inigo Goiri.
 discard 0b0c334  HDFS-14351. RBF: Optimize configuration item resolving for 
monitor namenode. Contributed by He Xiaoqiao and Inigo Goiri.
 discard ba429bc2 HDFS-14343. RBF: Fix renaming folders spread across multiple 
subclusters. Contributed by Ayush Saxena.
 discard 64ad3d6  HDFS-14334. RBF: Use human readable format for long numbers 
in the Router UI. Contributed by Inigo Goiri.
 discard 4a21db8  HDFS-14335. RBF: Fix heartbeat typos in the Router. 
Contributed by CR Hota.
 discard e6eacbd  HDFS-14331. RBF: IOE While Removing Mount Entry. Contributed 
by Ayush Saxena.
 discard 5634f14  HDFS-14329. RBF: Add maintenance nodes to federation metrics. 
Contributed by Ayush Saxena.
 discard 3e01881  HDFS-14259. RBF: Fix safemode message for Router. Contributed 
by Ranith Sadar.
 discard 58c5457  HDFS-14322. RBF: Security manager should not load if security 
is disabled. Contributed by CR Hota.
 discard 9d809a2  HDFS-14052. RBF: Use Router keytab for WebHDFS. Contributed 
by CR Hota.
 discard d897cee  HDFS-14307. RBF: Update tests to use internal Whitebox 
instead of Mockito. Contributed by CR Hota.
 discard f330e6f  HDFS-14249. RBF: Tooling to identify the subcluster location 
of a file. Contributed by Inigo Goiri.
 discard 1761b90  HDFS-14268. RBF: Fix the location of the DNs in 
getDatanodeReport(). Contributed by Inigo Goiri.
 discard 8478112  HDFS-14226. RBF: Setting attributes should set on all 
subclusters' directories. Contributed by Ayush Saxena.
 discard 49d489c  HDFS-13358. RBF: Support for Delegation Token (RPC). 
Contributed by CR Hota.
 discard 7bbe35e  HDFS-14230. RBF: Throw RetriableException instead of 
IOException when no namenodes available. Contributed by Fei Hui.
 discard 4585b97  HDFS-14252. RBF : Exceptions are exposing the actual sub 
cluster path. Contributed by Ayush Saxena.
 discard 08a4e69  HDFS-14225. RBF : MiniRouterDFSCluster should configure the 
failover proxy provider for namespace. Contributed by Ranith Sardar.
 discard e43400c  HDFS-13404. RBF: 
TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fails.
 discard 4feb3ae  HDFS-14215. RBF: Remove dependency on availability of default 
namespace. Contributed by Ayush Saxena.
 discard 8dc059d  HDFS-14224. RBF: NPE in getContentSummary() for getEcPolicy() 
in case of multiple destinations. Contributed by Ayush Saxena.
 discard 29fa6ae  HDFS-14223. RBF: Add configuration documents for using 
multiple sub-clusters. Contributed by Takanobu Asanuma.
 discard 3864de4  HDFS-14209. RBF: setQuota() through router is working for 
only the mount Points under the Source column in MountTable. Contributed by 
Shubham Dewan.
 discard 4257376  HDFS-14156. RBF: rollEdit() command fails with Router. 
Contributed by Shubham Dewan.
 discard 9c22816  HDFS-14193. RBF: Inconsistency with the Default Namespace. 
Contributed by Ayush Saxena.
 discard 7c01d25  HDFS-14129. addendum to HDFS-14129. Contributed by Ranith 
Sardar.
 discard e86634a  HDFS-14129. RBF: Create new policy provider for router. 
Contributed by Ranith Sardar.
 discard 692c581  HDFS-14206. RBF: Cleanup quota modules. Contributed by Inigo 
Goiri.
 discard 03d98f5  HDFS-13856. RBF: RouterAdmin should support dfsrouteradmin 
-refreshRouterArgs command. Contributed by yanghuafeng.
 discard 64689a2  HDFS-14191. RBF: Remove hard coded router status from 
FederationMetrics. Contributed by Ranith Sardar.
 discard 2a7998b  HDFS-14150. RBF: Quotas of the sub-cluster should be removed 
when removing the mount point. Contributed by Takanobu Asanuma.
 discard 1372cc8  HDFS-14161. RBF: Throw StandbyException instead of 
IOException so that client can retry when can not get connection. Contributed 
by Fei Hui.
 discard 2b48aa0  HDFS-14167. RBF: Add stale nodes to federation metrics. 
Contributed by Inigo Goiri.
 discard e3ab7a4  HDFS-13443. RBF: Update mount table cache immediately after 
changing (add/update/remove) mount table entries. Contributed by Mohammad 
Arshad.
 discard aa6cff0  HDFS-14151. RBF: 

[hadoop] branch HDFS-13891 updated (893c708 -> 40963f9)

2019-05-02 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a change to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 discard 893c708  HDFS-14457. RBF: Add order text SPACE in CLI command 'hdfs 
dfsrouteradmin'. Contributed by luhuachao.
 discard 91fea6f  HDFS-13972. RBF: Support for Delegation Token (WebHDFS). 
Contributed by CR Hota.
 discard e8c6c20  HDFS-14422. RBF: Router shouldn't allow READ operations in 
safe mode. Contributed by Inigo Goiri.
 discard 940c0c7  HDFS-14369. RBF: Fix trailing / for webhdfs. Contributed by 
Akira Ajisaka.
 discard 9b2e8d4  HDFS-13853. RBF: RouterAdmin update cmd is overwriting the 
entry not updating the existing. Contributed by Ayush Saxena.
 discard de204c3  HDFS-14316. RBF: Support unavailable subclusters for mount 
points with multiple destinations. Contributed by Inigo Goiri.
 discard b0bc109  HDFS-14388. RBF: Prevent loading metric system when disabled. 
Contributed by Inigo Goiri.
 discard 09f20d0  HDFS-14351. RBF: Optimize configuration item resolving for 
monitor namenode. Contributed by He Xiaoqiao and Inigo Goiri.
 discard de22e9b  HDFS-14343. RBF: Fix renaming folders spread across multiple 
subclusters. Contributed by Ayush Saxena.
 discard 364811a  HDFS-14334. RBF: Use human readable format for long numbers 
in the Router UI. Contributed by Inigo Goiri.
 discard 9e39186  HDFS-14335. RBF: Fix heartbeat typos in the Router. 
Contributed by CR Hota.
 discard 64ca9c0  HDFS-14331. RBF: IOE While Removing Mount Entry. Contributed 
by Ayush Saxena.
 discard baa2cb1  HDFS-14329. RBF: Add maintenance nodes to federation metrics. 
Contributed by Ayush Saxena.
 discard 0ed0226  HDFS-14259. RBF: Fix safemode message for Router. Contributed 
by Ranith Sadar.
 discard 9204211  HDFS-14322. RBF: Security manager should not load if security 
is disabled. Contributed by CR Hota.
 discard 12924f2  HDFS-14052. RBF: Use Router keytab for WebHDFS. Contributed 
by CR Hota.
 discard 8223879  HDFS-14307. RBF: Update tests to use internal Whitebox 
instead of Mockito. Contributed by CR Hota.
 discard 42886a0  HDFS-14249. RBF: Tooling to identify the subcluster location 
of a file. Contributed by Inigo Goiri.
 discard 35a7e46  HDFS-14268. RBF: Fix the location of the DNs in 
getDatanodeReport(). Contributed by Inigo Goiri.
 discard f63b6a2  HDFS-14226. RBF: Setting attributes should set on all 
subclusters' directories. Contributed by Ayush Saxena.
 discard a0138b8  HDFS-13358. RBF: Support for Delegation Token (RPC). 
Contributed by CR Hota.
 discard 92cd6e2  HDFS-14230. RBF: Throw RetriableException instead of 
IOException when no namenodes available. Contributed by Fei Hui.
 discard b3b157e  HDFS-14252. RBF : Exceptions are exposing the actual sub 
cluster path. Contributed by Ayush Saxena.
 discard d6aeb7c  HDFS-14225. RBF : MiniRouterDFSCluster should configure the 
failover proxy provider for namespace. Contributed by Ranith Sardar.
 discard c14bd63  HDFS-13404. RBF: 
TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fails.
 discard 4d7cf87  HDFS-14215. RBF: Remove dependency on availability of default 
namespace. Contributed by Ayush Saxena.
 discard 6fe8017  HDFS-14224. RBF: NPE in getContentSummary() for getEcPolicy() 
in case of multiple destinations. Contributed by Ayush Saxena.
 discard 9335ec5  HDFS-14223. RBF: Add configuration documents for using 
multiple sub-clusters. Contributed by Takanobu Asanuma.
 discard a9aadbb  HDFS-14209. RBF: setQuota() through router is working for 
only the mount Points under the Source column in MountTable. Contributed by 
Shubham Dewan.
 discard 2e50862  HDFS-14156. RBF: rollEdit() command fails with Router. 
Contributed by Shubham Dewan.
 discard 17b77c9  HDFS-14193. RBF: Inconsistency with the Default Namespace. 
Contributed by Ayush Saxena.
 discard 2d8ebf5  HDFS-14129. addendum to HDFS-14129. Contributed by Ranith 
Sardar.
 discard 50cb4a2  HDFS-14129. RBF: Create new policy provider for router. 
Contributed by Ranith Sardar.
 discard a48c057  HDFS-14206. RBF: Cleanup quota modules. Contributed by Inigo 
Goiri.
 discard b1d250f  HDFS-13856. RBF: RouterAdmin should support dfsrouteradmin 
-refreshRouterArgs command. Contributed by yanghuafeng.
 discard d90d20f  HDFS-14191. RBF: Remove hard coded router status from 
FederationMetrics. Contributed by Ranith Sardar.
 discard d3370f3  HDFS-14150. RBF: Quotas of the sub-cluster should be removed 
when removing the mount point. Contributed by Takanobu Asanuma.
 discard 45895e6  HDFS-14161. RBF: Throw StandbyException instead of 
IOException so that client can retry when can not get connection. Contributed 
by Fei Hui.
 discard 2828b09  HDFS-14167. RBF: Add stale nodes to federation metrics. 
Contributed by Inigo Goiri.
 discard 00f4bb2  HDFS-13443. RBF: Update mount table cache immediately after 
changing (add/update/remove) mount table entries. Contributed by Mohammad 
Arshad.
 discard cce6ae5  HDFS-14151. RBF: Make 

[hadoop] branch HDFS-13891 updated: HDFS-14454. RBF: getContentSummary() should allow non-existing folders. Contributed by Inigo Goiri.

2019-05-02 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDFS-13891 by this push:
 new 206b082  HDFS-14454. RBF: getContentSummary() should allow 
non-existing folders. Contributed by Inigo Goiri.
206b082 is described below

commit 206b08292beb9eaed323d76ad5050e9cb289f1e3
Author: Ayush Saxena 
AuthorDate: Fri May 3 04:54:09 2019 +0530

HDFS-14454. RBF: getContentSummary() should allow non-existing folders. 
Contributed by Inigo Goiri.
---
 .../server/federation/router/RemoteResult.java |  84 ++
 .../federation/router/RouterClientProtocol.java|  65 ---
 .../server/federation/router/RouterRpcClient.java  |  79 ++---
 .../server/federation/FederationTestUtils.java | 128 ++
 .../hdfs/server/federation/MockNamenode.java   | 109 
 .../federation/router/TestRouterFaultTolerant.java | 186 -
 .../router/TestRouterMissingFolderMulti.java   | 182 
 7 files changed, 670 insertions(+), 163 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteResult.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteResult.java
new file mode 100644
index 000..2fbcf42
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteResult.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import java.io.IOException;
+
+/**
+ * Result from a remote location.
+ * It includes the exception if there was any error.
+ * @param  Type of the remote location.
+ * @param  Type of the result.
+ */
+public class RemoteResult {
+  /** The remote location. */
+  private final T loc;
+  /** The result from the remote location. */
+  private final R result;
+  /** If the result is set; used for void types. */
+  private final boolean resultSet;
+  /** The exception if we couldn't get the result. */
+  private final IOException ioe;
+
+  public RemoteResult(T location, R r) {
+this.loc = location;
+this.result = r;
+this.resultSet = true;
+this.ioe = null;
+  }
+
+  public RemoteResult(T location, IOException e) {
+this.loc = location;
+this.result = null;
+this.resultSet = false;
+this.ioe = e;
+  }
+
+  public T getLocation() {
+return loc;
+  }
+
+  public boolean hasResult() {
+return resultSet;
+  }
+
+  public R getResult() {
+return result;
+  }
+
+  public boolean hasException() {
+return getException() != null;
+  }
+
+  public IOException getException() {
+return ioe;
+  }
+
+  @Override
+  public String toString() {
+StringBuilder sb = new StringBuilder()
+.append("loc=").append(getLocation());
+if (hasResult()) {
+  sb.append(" result=").append(getResult());
+}
+if (hasException()) {
+  sb.append(" exception=").append(getException());
+}
+return sb.toString();
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index 6039083..f1f1c42 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -728,9 +728,9 @@ public class RouterClientProtocol implements ClientProtocol 
{
 RemoteMethod method = new RemoteMethod("getListing",
 new Class[] {String.class, startAfter.getClass(), boolean.class},
 new RemoteParam(), startAfter, needLocation);
-Map listings =
-rpcClient.invokeConcurrent(locations, method,
-!this.allowPartialList, false, 

[hadoop] branch trunk updated: HADOOP-16059. Use SASL Factories Cache to Improve Performance. Contributed by Ayush Saxena.

2019-05-02 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f1875b2  HADOOP-16059. Use SASL Factories Cache to Improve 
Performance. Contributed by Ayush Saxena.
f1875b2 is described below

commit f1875b205e492ef071e7ef78b147efee0e51263d
Author: Vinayakumar B 
AuthorDate: Fri May 3 11:22:14 2019 +0530

HADOOP-16059. Use SASL Factories Cache to Improve Performance. Contributed 
by Ayush Saxena.
---
 .../hadoop/security/FastSaslClientFactory.java | 80 ++
 .../hadoop/security/FastSaslServerFactory.java | 78 +
 .../org/apache/hadoop/security/SaslRpcClient.java  | 12 +++-
 .../org/apache/hadoop/security/SaslRpcServer.java  | 58 ++--
 .../datatransfer/sasl/SaslParticipant.java | 26 ++-
 5 files changed, 196 insertions(+), 58 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslClientFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslClientFactory.java
new file mode 100644
index 000..d5259d3
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslClientFactory.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslClient;
+import javax.security.sasl.SaslClientFactory;
+import javax.security.sasl.SaslException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Class for dealing with caching SASL client factories.
+ */
+@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
+public class FastSaslClientFactory implements SaslClientFactory {
+  private final Map> factoryCache =
+  new HashMap>();
+
+  public FastSaslClientFactory(Map props) {
+final Enumeration factories =
+Sasl.getSaslClientFactories();
+while (factories.hasMoreElements()) {
+  SaslClientFactory factory = factories.nextElement();
+  for (String mech : factory.getMechanismNames(props)) {
+if (!factoryCache.containsKey(mech)) {
+  factoryCache.put(mech, new ArrayList());
+}
+factoryCache.get(mech).add(factory);
+  }
+}
+  }
+
+  @Override
+  public String[] getMechanismNames(Map props) {
+return factoryCache.keySet().toArray(new String[0]);
+  }
+
+  @Override
+  public SaslClient createSaslClient(String[] mechanisms,
+  String authorizationId, String protocol, String serverName,
+  Map props, CallbackHandler cbh) throws SaslException {
+for (String mechanism : mechanisms) {
+  List factories = factoryCache.get(mechanism);
+  if (factories != null) {
+for (SaslClientFactory factory : factories) {
+  SaslClient saslClient =
+  factory.createSaslClient(new String[] {mechanism},
+  authorizationId, protocol, serverName, props, cbh);
+  if (saslClient != null) {
+return saslClient;
+  }
+}
+  }
+}
+return null;
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslServerFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslServerFactory.java
new file mode 100644
index 000..79519d4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslServerFactory.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ 

[hadoop] branch trunk updated: HDDS-1412. Provide example k8s deployment files as part of the release package

2019-05-02 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2ab318b  HDDS-1412. Provide example k8s deployment files as part of 
the release package
2ab318b is described below

commit 2ab318b6036d260ac0752f3e7c410594db92d5fe
Author: Márton Elek 
AuthorDate: Thu May 2 11:10:06 2019 +0200

HDDS-1412. Provide example k8s deployment files as part of the release 
package

Closes #719
---
 hadoop-ozone/common/pom.xml| 24 +++
 .../dist/dev-support/bin/dist-layout-stitching |  2 +
 hadoop-ozone/dist/pom.xml  | 77 +-
 hadoop-ozone/dist/{ => src/main}/Dockerfile|  2 +-
 .../ozone/config.yaml} |  4 +-
 .../ozone/datanode-ds.yaml}| 35 --
 .../ozone/definitions/prometheus.yaml} | 25 ---
 .../ozone/flekszible.yaml} |  5 +-
 .../k8s/definitions/ozone/freon/flekszible.yaml}   | 11 +---
 .../ozone/freon/freon.yaml}| 32 +++--
 .../ozone/om-ss-service.yaml}  | 14 ++--
 .../ozone/om-ss.yaml}  | 49 +-
 .../ozone/s3g-ss-service.yaml} | 10 ++-
 .../ozone/s3g-ss.yaml} | 21 ++
 .../ozone/scm-ss-service.yaml} |  2 -
 .../ozone/scm-ss.yaml} | 43 
 .../definitions/ozone/transformations/config.yaml} | 17 +++--
 .../ozone/transformations/emptydir.yaml}   | 73 +++-
 .../minikube/LICENSE.header}   |  4 --
 .../minikube}/config-configmap.yaml| 26 
 .../minikube}/datanode-service.yaml|  6 +-
 .../minikube}/datanode-statefulset.yaml| 42 +++-
 .../examples/minikube/flekszible/flekszible.yaml   | 45 +
 .../minikube/freon/freon-deployment.yaml}  | 40 +--
 .../minikube}/om-public-service.yaml   |  7 +-
 .../{ozone => examples/minikube}/om-service.yaml   |  7 +-
 .../minikube}/om-statefulset.yaml  | 63 +-
 .../minikube}/s3g-public-service.yaml  |  7 +-
 .../{ozone => examples/minikube}/s3g-service.yaml  |  7 +-
 .../minikube}/s3g-statefulset.yaml | 32 -
 .../minikube}/scm-public-service.yaml  |  7 +-
 .../{ozone => examples/minikube}/scm-service.yaml  |  7 +-
 .../minikube}/scm-statefulset.yaml | 64 +-
 .../ozone/LICENSE.header}  |  4 --
 .../k8s/{ => examples}/ozone/config-configmap.yaml | 26 
 .../ozone/datanode-daemonset.yaml} | 46 +++--
 .../k8s/examples/ozone/flekszible/flekszible.yaml  | 43 
 .../ozone/freon/freon-deployment.yaml} | 41 ++--
 .../ozone/om-service.yaml} | 15 +++--
 .../k8s/{ => examples}/ozone/om-statefulset.yaml   | 69 ++-
 .../main/k8s/{ => examples}/ozone/s3g-service.yaml |  7 +-
 .../k8s/{ => examples}/ozone/s3g-statefulset.yaml  | 32 -
 .../main/k8s/{ => examples}/ozone/scm-service.yaml |  7 +-
 .../k8s/{ => examples}/ozone/scm-statefulset.yaml  | 64 +-
 .../dist/src/main/k8s/prometheus/configmap.yaml| 51 --
 .../k8s/prometheus/prometheus-public-serivce.yaml  | 28 
 .../dist/src/main/k8s/prometheus/role.yaml | 31 -
 .../dist/src/main/k8s/prometheus/rolebindng.yaml   | 27 
 .../main/k8s/prometheus/scm-public-service.yaml| 29 
 49 files changed, 642 insertions(+), 688 deletions(-)

diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index efea791..6098b4d 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -169,4 +169,28 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   
+  
+
+  k8s-dev
+  
+
+  
+io.fabric8
+docker-maven-plugin
+0.29.0
+
+  
+
+  ${user.name}/ozone:${project.version}
+  
+${project.basedir}
+  
+
+  
+
+  
+
+  
+
+  
 
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching 
b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index ffb469b..0ce4e8f 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -120,3 +120,5 @@ cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./
 run cp -p -R "${ROOT}/hadoop-ozone/dist/target/compose" .
 run cp -p -r 

[hadoop] branch trunk updated: HDDS-1478. Provide k8s resources files for prometheus and performance tests

2019-05-02 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f1673b0  HDDS-1478. Provide k8s resources files for prometheus and 
performance tests
f1673b0 is described below

commit f1673b0db136acb99c8735a1cce9c30ad6c3e15a
Author: Márton Elek 
AuthorDate: Thu May 2 11:23:44 2019 +0200

HDDS-1478. Provide k8s resources files for prometheus and performance tests

Closes #783
---
 .../prometheus.yaml => jaeger/flekszible.yaml} | 12 +---
 .../prometheus.yaml => jaeger/jaeger.yaml} | 46 ++---
 .../definitions/{prometheus.yaml => profiler.yaml} |  7 +-
 .../definitions/ozone/definitions/prometheus.yaml  |  2 +-
 .../definitions/{prometheus.yaml => tracing.yaml}  | 21 --
 .../main/k8s/definitions/prometheus/configmap.yaml | 49 ++
 .../definitions/enable.yaml}   |  6 +-
 .../prometheus.yaml => prometheus/deployment.yaml} | 40 ---
 .../prometheus.yaml => prometheus/flekszible.yaml} | 12 +---
 .../prometheus.yaml => prometheus/role.yaml}   | 29 +---
 .../rolebinding.yaml}  | 23 ---
 .../service-account.yaml}  | 15 ++--
 .../prometheus.yaml => prometheus/service.yaml}| 21 +++---
 .../ozone-dev/LICENSE.header}  | 11 ---
 .../k8s/examples/ozone-dev/config-configmap.yaml   | 37 ++
 .../k8s/examples/ozone-dev/datanode-daemonset.yaml | 63 +
 .../ozone-dev/flekszible/flekszible.yaml}  | 32 ++---
 .../ozone-dev/freon/freon-deployment.yaml} | 47 ++---
 .../ozone-dev/jaeger-public-service.yaml}  | 24 ---
 .../ozone-dev/jaeger-service.yaml} | 24 ---
 .../ozone-dev/jaeger-statefulset.yaml} | 36 +++---
 .../ozone-dev/om-public-service.yaml}  | 24 ---
 .../ozone-dev/om-service.yaml} | 24 ---
 .../k8s/examples/ozone-dev/om-statefulset.yaml | 79 ++
 .../ozone-dev/prometheus-clusterrole.yaml} | 38 ---
 .../ozone-dev/prometheus-deployment.yaml}  | 42 +---
 .../prometheus-operator-clusterrolebinding.yaml}   | 24 ---
 .../prometheus-operator-serviceaccount.yaml}   | 16 ++---
 .../ozone-dev/prometheus-service.yaml} | 22 +++---
 .../ozone-dev/prometheusconf-configmap.yaml| 50 ++
 .../ozone-dev/s3g-public-service.yaml} | 24 ---
 .../ozone-dev/s3g-service.yaml}| 24 ---
 .../k8s/examples/ozone-dev/s3g-statefulset.yaml| 58 
 .../ozone-dev/scm-public-service.yaml} | 24 ---
 .../ozone-dev/scm-service.yaml}| 24 ---
 .../k8s/examples/ozone-dev/scm-statefulset.yaml| 74 
 36 files changed, 821 insertions(+), 283 deletions(-)

diff --git 
a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml
similarity index 78%
copy from 
hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml
copy to hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml
index 581481f..8fdc155 100644
--- 
a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml
@@ -13,14 +13,4 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-name: ozone/prometheus
-description: Enable prometheus monitoring in Ozone

-- type: Add
-  trigger:
-metadata:
-  name: config
-  path:
-- data
-  value:
-OZONE-SITE.XML_hdds.prometheus.endpoint.enabled: true
+description: Jaeger tracing server
diff --git 
a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml
similarity index 53%
copy from 
hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml
copy to hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml
index 581481f..4796092 100644
--- 
a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml
@@ -13,14 +13,42 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-name: ozone/prometheus
-description: Enable prometheus monitoring in Ozone
+apiVersion: v1
+kind: Service
+metadata:
+  name: jaeger
+spec:
+  clusterIP: None
+  selector:
+app: jaeger
+component: jaeger
+  ports:
+- name: ui
+  

[hadoop] branch trunk updated: HDDS-1468. Inject configuration values to Java objects

2019-05-02 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a2887f5  HDDS-1468. Inject configuration values to Java objects
a2887f5 is described below

commit a2887f5c23a695e74bb7693207e9240c8b94d8cf
Author: Márton Elek 
AuthorDate: Thu May 2 11:35:01 2019 +0200

HDDS-1468. Inject configuration values to Java objects

Closes #772
---
 .../java/org/apache/hadoop/hdds/conf/Config.java   |  47 +
 .../org/apache/hadoop/hdds/conf/ConfigGroup.java   |  32 ++
 .../org/apache/hadoop/hdds/conf/ConfigType.java|  34 +++
 .../hadoop/hdds/conf/ConfigurationException.java   |  34 +++
 .../hadoop/hdds/conf/OzoneConfiguration.java   | 109 -
 .../hadoop/hdds/conf/SimpleConfiguration.java  |  83 
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   |  32 ++
 .../hdds/scm/container/ReplicationManager.java |  72 +-
 .../hdds/scm/server/StorageContainerManager.java   |  10 +-
 .../hdds/scm/container/TestReplicationManager.java |   8 +-
 .../hdds/scm/safemode/TestSafeModeHandler.java |   7 +-
 11 files changed, 438 insertions(+), 30 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/Config.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/Config.java
new file mode 100644
index 000..2d1e18a
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/Config.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Mark field to be configurable from ozone-site.xml.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.METHOD)
+public @interface Config {
+
+  /**
+   * Configuration fragment relative to the prefix defined with @ConfigGroup.
+   */
+  String key();
+
+  /**
+   * Type of configuration. Use AUTO to decide it based on the java type.
+   */
+  ConfigType type() default ConfigType.AUTO;
+
+  /**
+   * If type == TIME the unit should be defined with this attribute.
+   */
+  TimeUnit timeUnit() default TimeUnit.MILLISECONDS;
+}
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java
new file mode 100644
index 000..dd24ccb
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Mark pojo which holds configuration variables.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface ConfigGroup {
+  String prefix();
+}
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
new file mode