This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new e2f0f72  HDDS-1469. Generate default configuration fragments based on 
annotations
e2f0f72 is described below

commit e2f0f7267791051b561a6e291a22bbc58c34d068
Author: Márton Elek <e...@apache.org>
AuthorDate: Thu May 2 12:14:43 2019 +0200

    HDDS-1469. Generate default configuration fragments based on annotations
    
    Closes #773
---
 hadoop-hdds/common/pom.xml                         |   5 +
 .../hadoop/hdds/conf/OzoneConfiguration.java       |  29 ++++-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |  12 --
 .../common/src/main/resources/ozone-default.xml    |  20 ----
 .../hadoop/hdds/conf/SimpleConfiguration.java      |  15 ++-
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   |   4 +-
 hadoop-hdds/config/pom.xml                         |  66 +++++++++++
 .../java/org/apache/hadoop/hdds/conf/Config.java   |  12 ++
 .../hadoop/hdds/conf/ConfigFileAppender.java       | 127 +++++++++++++++++++++
 .../hadoop/hdds/conf/ConfigFileGenerator.java      | 113 ++++++++++++++++++
 .../org/apache/hadoop/hdds/conf/ConfigGroup.java   |   0
 .../org/apache/hadoop/hdds/conf/ConfigTag.java}    |  32 ++++--
 .../org/apache/hadoop/hdds/conf/ConfigType.java    |   0
 .../hadoop/hdds/conf/ConfigurationException.java   |   2 +-
 .../org/apache/hadoop/hdds/conf/package-info.java} |  22 +---
 .../services/javax.annotation.processing.Processor |  16 +++
 .../hadoop/hdds/conf/ConfigurationExample.java}    |  28 +++--
 .../hadoop/hdds/conf/TestConfigFileAppender.java}  |  34 ++++--
 .../org/apache/hadoop/hdds/conf/package-info.java} |  18 +--
 hadoop-hdds/pom.xml                                |   7 ++
 .../hdds/scm/container/ReplicationManager.java     |  35 +++++-
 .../hdds/scm/container/TestReplicationManager.java |  14 +++
 .../src/test/resources/core-site.xml               |  24 ++++
 .../src/test/resources/hdfs-site.xml               |  24 ++++
 24 files changed, 547 insertions(+), 112 deletions(-)

diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 5d1bb52..51560ca 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -37,6 +37,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
   <dependencies>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-config</artifactId>
+    </dependency>
+
+    <dependency>
       <groupId>javax.annotation</groupId>
       <artifactId>javax.annotation-api</artifactId>
       <version>1.2</version>
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index b4dc94a..b32ad63 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.hdds.conf;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBException;
 import javax.xml.bind.Unmarshaller;
@@ -28,6 +25,7 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
+import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.URL;
@@ -36,6 +34,9 @@ import java.util.Enumeration;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
 /**
  * Configuration for ozone.
  */
@@ -47,12 +48,33 @@ public class OzoneConfiguration extends Configuration {
 
   public OzoneConfiguration() {
     OzoneConfiguration.activate();
+    loadDefaults();
   }
 
   public OzoneConfiguration(Configuration conf) {
     super(conf);
     //load the configuration from the classloader of the original conf.
     setClassLoader(conf.getClassLoader());
+    if (!(conf instanceof OzoneConfiguration)) {
+      loadDefaults();
+    }
+  }
+
+  private void loadDefaults() {
+    try {
+      //there could be multiple ozone-default-generated.xml files on the
+      // classpath, which are generated by the annotation processor.
+      // Here we add all of them to the list of the available configuration.
+      Enumeration<URL> generatedDefaults =
+          OzoneConfiguration.class.getClassLoader().getResources(
+              "ozone-default-generated.xml");
+      while (generatedDefaults.hasMoreElements()) {
+        addResource(generatedDefaults.nextElement());
+      }
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    addResource("ozone-site.xml");
   }
 
   public List<Property> readPropertyFromXml(URL url) throws JAXBException {
@@ -265,7 +287,6 @@ public class OzoneConfiguration extends Configuration {
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
     Configuration.addDefaultResource("ozone-default.xml");
-    Configuration.addDefaultResource("ozone-site.xml");
   }
 
   /**
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 4dc60b3..ccfd7ca 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -348,18 +348,6 @@ public final class ScmConfigKeys {
   public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
       "10m";
 
-  public static final String HDDS_SCM_REPLICATION_THREAD_INTERVAL =
-      "hdds.scm.replication.thread.interval";
-
-  public static final String HDDS_SCM_REPLICATION_THREAD_INTERVAL_DEFAULT =
-      "5m";
-
-  public static final String HDDS_SCM_REPLICATION_EVENT_TIMEOUT =
-      "hdds.scm.replication.event.timeout";
-
-  public static final String HDDS_SCM_REPLICATION_EVENT_TIMEOUT_DEFAULT =
-      "10m";
-
   public static final String
       HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY =
       "hdds.scm.http.kerberos.principal";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 2cab7f3..6aafb58 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2386,26 +2386,6 @@
     </description>
   </property>
   <property>
-    <name>hdds.scm.replication.thread.interval</name>
-    <value>5m</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      There is a replication monitor thread running inside SCM which
-      takes care of replicating the containers in the cluster. This
-      property is used to configure the interval in which that thread
-      runs.
-    </description>
-  </property>
-  <property>
-    <name>hdds.scm.replication.event.timeout</name>
-    <value>10m</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      Timeout for the container replication/deletion commands sent
-      to datanodes. After this timeout the command will be retried.
-    </description>
-  </property>
-  <property>
     <name>hdds.tracing.enabled</name>
     <value>true</value>
     <tag>OZONE, HDDS</tag>
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
index ac696b3..f18fd5e 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
@@ -35,28 +35,33 @@ public class SimpleConfiguration {
 
   private long waitTime = 1;
 
-  @Config(key = "address")
+  @Config(key = "address", defaultValue = "localhost", description = "Just "
+      + "for testing", tags = ConfigTag.MANAGEMENT)
   public void setClientAddress(String clientAddress) {
     this.clientAddress = clientAddress;
   }
 
-  @Config(key = "bind.host")
+  @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Just "
+      + "for testing", tags = ConfigTag.MANAGEMENT)
   public void setBindHost(String bindHost) {
     this.bindHost = bindHost;
   }
 
-  @Config(key = "enabled")
+  @Config(key = "enabled", defaultValue = "true", description = "Just for "
+      + "testing", tags = ConfigTag.MANAGEMENT)
   public void setEnabled(boolean enabled) {
     this.enabled = enabled;
   }
 
-  @Config(key = "port")
+  @Config(key = "port", defaultValue = "9878", description = "Just for "
+      + "testing", tags = ConfigTag.MANAGEMENT)
   public void setPort(int port) {
     this.port = port;
   }
 
   @Config(key = "wait", type = ConfigType.TIME, timeUnit =
-      TimeUnit.SECONDS)
+      TimeUnit.SECONDS, defaultValue = "10m", description = "Just for "
+      + "testing", tags = ConfigTag.MANAGEMENT)
   public void setWaitTime(long waitTime) {
     this.waitTime = waitTime;
   }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
index bf8ac04..0a80478 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
@@ -124,8 +124,8 @@ public class TestOzoneConfiguration {
     SimpleConfiguration configuration =
         ozoneConfiguration.getObject(SimpleConfiguration.class);
 
-    Assert.assertEquals(false, configuration.isEnabled());
-    Assert.assertEquals(9860, configuration.getPort());
+    Assert.assertEquals(true, configuration.isEnabled());
+    Assert.assertEquals(9878, configuration.getPort());
   }
 
 
diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml
new file mode 100644
index 0000000..075f587
--- /dev/null
+++ b/hadoop-hdds/config/pom.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0";
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdds</artifactId>
+    <version>0.5.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-hdds-config</artifactId>
+  <version>0.5.0-SNAPSHOT</version>
+  <description>Apache Hadoop Distributed Data Store Config Tools</description>
+  <name>Apache Hadoop HDDS Config</name>
+  <packaging>jar</packaging>
+
+  <properties>
+
+  </properties>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <executions>
+        <execution>
+          <id>default-compile</id>
+          <phase>compile</phase>
+          <goals>
+            <goal>compile</goal>
+          </goals>
+          <configuration>
+            <!-- don't need to activate annotation processor (which may not be 
available yet) for compilation -->
+            <compilerArgument>-proc:none</compilerArgument>
+          </configuration>
+        </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/Config.java 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
similarity index 90%
rename from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/Config.java
rename to 
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
index 2d1e18a..70aa58d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/Config.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
@@ -36,6 +36,16 @@ public @interface Config {
   String key();
 
   /**
+   * Default value to use if not set.
+   */
+  String defaultValue();
+
+  /**
+   * Custom description as a help.
+   */
+  String description();
+
+  /**
    * Type of configuration. Use AUTO to decide it based on the java type.
    */
   ConfigType type() default ConfigType.AUTO;
@@ -44,4 +54,6 @@ public @interface Config {
    * If type == TIME the unit should be defined with this attribute.
    */
   TimeUnit timeUnit() default TimeUnit.MILLISECONDS;
+
+  ConfigTag[] tags();
 }
diff --git 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
new file mode 100644
index 0000000..9463f42
--- /dev/null
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.InputStream;
+import java.io.Writer;
+import java.util.Arrays;
+import java.util.stream.Collectors;
+
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Simple DOM based config file writer.
+ * <p>
+ * This class can init/load existing ozone-default-generated.xml fragments
+ * and append new entries and write to the file system.
+ */
+public class ConfigFileAppender {
+
+  private Document document;
+
+  private final DocumentBuilder builder;
+
+  public ConfigFileAppender() {
+    try {
+      DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+      builder = factory.newDocumentBuilder();
+    } catch (Exception ex) {
+      throw new ConfigurationException("Can initialize new configuration", ex);
+    }
+  }
+
+  /**
+   * Initialize a new ozone-site.xml structure with empty content.
+   */
+  public void init() {
+    try {
+      document = builder.newDocument();
+      document.appendChild(document.createElement("configuration"));
+    } catch (Exception ex) {
+      throw new ConfigurationException("Can initialize new configuration", ex);
+    }
+  }
+
+  /**
+   * Load existing ozone-site.xml content and parse the DOM tree.
+   */
+  public void load(InputStream stream) {
+    try {
+      document = builder.parse(stream);
+    } catch (Exception ex) {
+      throw new ConfigurationException("Can't load existing configuration", 
ex);
+    }
+  }
+
+  /**
+   * Add configuration fragment.
+   */
+  public void addConfig(String key, String defaultValue, String description,
+      ConfigTag[] tags) {
+    Element root = document.getDocumentElement();
+    Element propertyElement = document.createElement("property");
+
+    addXmlElement(propertyElement, "name", key);
+
+    addXmlElement(propertyElement, "value", defaultValue);
+
+    addXmlElement(propertyElement, "description", description);
+
+    String tagsAsString = Arrays.stream(tags).map(tag -> tag.name())
+        .collect(Collectors.joining(", "));
+
+    addXmlElement(propertyElement, "tag", tagsAsString);
+
+    root.appendChild(propertyElement);
+  }
+
+  private void addXmlElement(Element parentElement, String tagValue,
+      String textValue) {
+    Element element = document.createElement(tagValue);
+    element.appendChild(document.createTextNode(textValue));
+    parentElement.appendChild(element);
+  }
+
+  /**
+   * Write out the XML content to a writer.
+   */
+  public void write(Writer writer) {
+    try {
+      TransformerFactory transformerFactory = TransformerFactory.newInstance();
+      Transformer transf = transformerFactory.newTransformer();
+
+      transf.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
+      transf.setOutputProperty(OutputKeys.INDENT, "yes");
+      transf
+          .setOutputProperty("{http://xml.apache.org/xslt}indent-amount";, "2");
+
+      transf.transform(new DOMSource(document), new StreamResult(writer));
+    } catch (TransformerException e) {
+      throw new ConfigurationException("Can't write the configuration xml", e);
+    }
+  }
+}
diff --git 
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
new file mode 100644
index 0000000..e9e88a0
--- /dev/null
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import javax.annotation.processing.AbstractProcessor;
+import javax.annotation.processing.Filer;
+import javax.annotation.processing.RoundEnvironment;
+import javax.annotation.processing.SupportedAnnotationTypes;
+import javax.lang.model.element.Element;
+import javax.lang.model.element.ElementKind;
+import javax.lang.model.element.TypeElement;
+import javax.tools.Diagnostic.Kind;
+import javax.tools.FileObject;
+import javax.tools.StandardLocation;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.nio.charset.StandardCharsets;
+import java.util.Set;
+
+/**
+ * Annotation processor to generate config fragments from Config annotations.
+ */
+@SupportedAnnotationTypes("org.apache.hadoop.hdds.conf.ConfigGroup")
+public class ConfigFileGenerator extends AbstractProcessor {
+
+  public static final String OUTPUT_FILE_NAME = "ozone-default-generated.xml";
+
+  @Override
+  public boolean process(Set<? extends TypeElement> annotations,
+      RoundEnvironment roundEnv) {
+    if (roundEnv.processingOver()) {
+      return false;
+    }
+
+    Filer filer = processingEnv.getFiler();
+
+    try {
+
+      //load existing generated config (if exists)
+      ConfigFileAppender appender = new ConfigFileAppender();
+      try (InputStream input = filer
+          .getResource(StandardLocation.CLASS_OUTPUT, "",
+              OUTPUT_FILE_NAME).openInputStream()) {
+        appender.load(input);
+      } catch (FileNotFoundException ex) {
+        appender.init();
+      }
+
+      Set<? extends Element> annotatedElements =
+          roundEnv.getElementsAnnotatedWith(ConfigGroup.class);
+      for (Element annotatedElement : annotatedElements) {
+        TypeElement configGroup = (TypeElement) annotatedElement;
+
+        //check if any of the setters are annotated with @Config
+        for (Element element : configGroup.getEnclosedElements()) {
+          if (element.getKind() == ElementKind.METHOD) {
+            processingEnv.getMessager()
+                .printMessage(Kind.WARNING, 
element.getSimpleName().toString());
+            if (element.getSimpleName().toString().startsWith("set")
+                && element.getAnnotation(Config.class) != null) {
+
+              //update the ozone-site-generated.xml
+              Config configAnnotation = element.getAnnotation(Config.class);
+              ConfigGroup configGroupAnnotation =
+                  configGroup.getAnnotation(ConfigGroup.class);
+
+              String key = configGroupAnnotation.prefix() + "."
+                  + configAnnotation.key();
+
+              appender.addConfig(key,
+                  configAnnotation.defaultValue(),
+                  configAnnotation.description(),
+                  configAnnotation.tags());
+            }
+          }
+
+        }
+        FileObject resource = filer
+            .createResource(StandardLocation.CLASS_OUTPUT, "",
+                OUTPUT_FILE_NAME);
+
+        try (Writer writer = new OutputStreamWriter(
+            resource.openOutputStream(), StandardCharsets.UTF_8)) {
+          appender.write(writer);
+        }
+      }
+    } catch (IOException e) {
+      processingEnv.getMessager().printMessage(Kind.ERROR,
+          "Can't generate the config file from annotation: " + e.getMessage());
+    }
+    return false;
+  }
+
+
+}
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java
similarity index 100%
rename from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java
rename to 
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
similarity index 72%
copy from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
copy to 
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
index 23a8104..de50d2a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
@@ -18,17 +18,27 @@
 package org.apache.hadoop.hdds.conf;
 
 /**
- * Possible type of injected configuration.
+ * Available config tags.
  * <p>
- * AUTO means that the exact type will be identified based on the java type of
- * the configuration field.
+ * Note: the values are defined in ozone-default.xml by hadoop.tags.custom.
  */
-public enum ConfigType {
-  AUTO,
-  STRING,
-  BOOLEAN,
-  INT,
-  LONG,
-  TIME,
-  SIZE
+public enum ConfigTag {
+  OZONE,
+  MANAGEMENT,
+  SECURITY,
+  PERFORMANCE,
+  DEBUG,
+  CLIENT,
+  SERVER,
+  OM,
+  SCM,
+  CRITICAL,
+  RATIS,
+  CONTAINER,
+  REQUIRED,
+  REST,
+  STORAGE,
+  PIPELINE,
+  STANDALONE,
+  S3GATEWAY
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
similarity index 100%
copy from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
copy to 
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
similarity index 95%
copy from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
copy to 
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
index 9c6b213..2e68012 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdds.conf;
 
 /**
- * Exeception to throw in case of a configuration problem.
+ * Exception to throw in case of a configuration problem.
  */
 public class ConfigurationException extends RuntimeException {
   public ConfigurationException() {
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
similarity index 72%
copy from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
copy to 
hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
index 23a8104..e789040 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
+++ 
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -6,29 +6,17 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.conf;
 
 /**
- * Possible type of injected configuration.
- * <p>
- * AUTO means that the exact type will be identified based on the java type of
- * the configuration field.
+ * Generic configuration annotations, tools and generators.
  */
-public enum ConfigType {
-  AUTO,
-  STRING,
-  BOOLEAN,
-  INT,
-  LONG,
-  TIME,
-  SIZE
-}
+package org.apache.hadoop.hdds.conf;
diff --git 
a/hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor
 
b/hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor
new file mode 100644
index 0000000..f29efda
--- /dev/null
+++ 
b/hadoop-hdds/config/src/main/resources/META-INF/services/javax.annotation.processing.Processor
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.hdds.conf.ConfigFileGenerator
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
similarity index 63%
copy from 
hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
copy to 
hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
index ac696b3..2dd2669 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
+++ 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java
@@ -23,40 +23,46 @@ import java.util.concurrent.TimeUnit;
  * Example configuration to test the configuration injection.
  */
 @ConfigGroup(prefix = "ozone.scm.client")
-public class SimpleConfiguration {
+public class ConfigurationExample {
 
   private String clientAddress;
 
   private String bindHost;
 
-  private boolean enabled;
+  private boolean compressionEnabled;
 
   private int port = 1234;
 
   private long waitTime = 1;
 
-  @Config(key = "address")
+  @Config(key = "address", defaultValue = "localhost", description = "Client "
+      + "addres (To test string injection).", tags = ConfigTag.MANAGEMENT)
   public void setClientAddress(String clientAddress) {
     this.clientAddress = clientAddress;
   }
 
-  @Config(key = "bind.host")
+  @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Bind "
+      + "host(To test string injection).", tags = ConfigTag.MANAGEMENT)
   public void setBindHost(String bindHost) {
     this.bindHost = bindHost;
   }
 
-  @Config(key = "enabled")
-  public void setEnabled(boolean enabled) {
-    this.enabled = enabled;
+  @Config(key = "compression.enabled", defaultValue = "true", description =
+      "Compression enabled. (Just to test boolean flag)", tags =
+      ConfigTag.MANAGEMENT)
+  public void setCompressionEnabled(boolean compressionEnabled) {
+    this.compressionEnabled = compressionEnabled;
   }
 
-  @Config(key = "port")
+  @Config(key = "port", defaultValue = "1234", description = "Port number "
+      + "config (To test in injection)", tags = ConfigTag.MANAGEMENT)
   public void setPort(int port) {
     this.port = port;
   }
 
   @Config(key = "wait", type = ConfigType.TIME, timeUnit =
-      TimeUnit.SECONDS)
+      TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To "
+      + "test TIME config type)", tags = ConfigTag.MANAGEMENT)
   public void setWaitTime(long waitTime) {
     this.waitTime = waitTime;
   }
@@ -69,8 +75,8 @@ public class SimpleConfiguration {
     return bindHost;
   }
 
-  public boolean isEnabled() {
-    return enabled;
+  public boolean isCompressionEnabled() {
+    return compressionEnabled;
   }
 
   public int getPort() {
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
similarity index 52%
rename from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
rename to 
hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
index 9c6b213..0edb01a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java
+++ 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
@@ -17,18 +17,32 @@
  */
 package org.apache.hadoop.hdds.conf;
 
+import java.io.StringWriter;
+
+import org.junit.Assert;
+import org.junit.Test;
+
 /**
- * Exeception to throw in case of a configuration problem.
+ * Test the utility which loads/writes the config file fragments.
  */
-public class ConfigurationException extends RuntimeException {
-  public ConfigurationException() {
-  }
+public class TestConfigFileAppender {
 
-  public ConfigurationException(String message) {
-    super(message);
-  }
+  @Test
+  public void testInit() {
+    ConfigFileAppender appender = new ConfigFileAppender();
+
+    appender.init();
+
+    appender.addConfig("hadoop.scm.enabled", "true", "desc",
+        new ConfigTag[] {ConfigTag.OZONE, ConfigTag.SECURITY});
+
+    StringWriter builder = new StringWriter();
+    appender.write(builder);
+
+    Assert.assertTrue("Generated config should contain property key entry",
+        builder.toString().contains("<name>hadoop.scm.enabled</name>"));
 
-  public ConfigurationException(String message, Throwable cause) {
-    super(message, cause);
+    Assert.assertTrue("Generated config should contain tags",
+        builder.toString().contains("<tag>OZONE, SECURITY</tag>"));
   }
-}
+}
\ No newline at end of file
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
similarity index 78%
rename from 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
rename to 
hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
index 23a8104..e8b310d 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
+++ 
b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -14,21 +14,11 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
+ * <p>
+ * Testing configuration tools.
  */
-package org.apache.hadoop.hdds.conf;
 
 /**
- * Possible type of injected configuration.
- * <p>
- * AUTO means that the exact type will be identified based on the java type of
- * the configuration field.
+ * Testing configuration tools.
  */
-public enum ConfigType {
-  AUTO,
-  STRING,
-  BOOLEAN,
-  INT,
-  LONG,
-  TIME,
-  SIZE
-}
+package org.apache.hadoop.hdds.conf;
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 3e8132f..f77fe9d 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -38,6 +38,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
     <module>server-scm</module>
     <module>tools</module>
     <module>docs</module>
+    <module>config</module>
 
   </modules>
 
@@ -117,6 +118,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-config</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdds-container-service</artifactId>
         <version>${hdds.version}</version>
         <type>test-jar</type>
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index 8f62243..e247e96 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdds.scm.container;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.GeneratedMessage;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
 import org.apache.hadoop.hdds.conf.ConfigType;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.Config;
@@ -40,6 +41,8 @@ import 
org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.Time;
+
+import static org.apache.hadoop.hdds.conf.ConfigTag.*;
 import org.apache.ratis.util.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -54,7 +57,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
@@ -760,14 +762,37 @@ public class ReplicationManager {
      */
     private long eventTimeout = 10 * 60 * 1000;
 
-    @Config(key = "thread.interval", type = ConfigType.TIME, timeUnit =
-        TimeUnit.MILLISECONDS)
+    @Config(key = "thread.interval",
+        type = ConfigType.TIME,
+        defaultValue = "3s",
+        tags = {SCM, OZONE},
+        description = "When a heartbeat from the data node arrives on SCM, "
+            + "It is queued for processing with the time stamp of when the "
+            + "heartbeat arrived. There is a heartbeat processing thread "
+            + "inside "
+            + "SCM that runs at a specified interval. This value controls how "
+            + "frequently this thread is run.\n\n"
+            + "There are some assumptions build into SCM such as this "
+            + "value should allow the heartbeat processing thread to run at "
+            + "least three times more frequently than heartbeats and at least "
+            + "five times more than stale node detection time. "
+            + "If you specify a wrong value, SCM will gracefully refuse to "
+            + "run. "
+            + "For more info look at the node manager tests in SCM.\n"
+            + "\n"
+            + "In short, you don't need to change this."
+    )
     public void setInterval(long interval) {
       this.interval = interval;
     }
 
-    @Config(key = "event.timeout", type = ConfigType.TIME, timeUnit =
-        TimeUnit.MILLISECONDS)
+    @Config(key = "event.timeout",
+        type = ConfigType.TIME,
+        defaultValue = "10m",
+        tags = {SCM, OZONE},
+        description = "Timeout for the container replication/deletion commands 
"
+            + "sent  to datanodes. After this timeout the command will be "
+            + "retried.")
     public void setEventTimeout(long eventTimeout) {
       this.eventTimeout = eventTimeout;
     }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
index 00b4684..5da057e 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
@@ -575,6 +575,20 @@ public class TestReplicationManager {
     Assert.assertEquals(0, datanodeCommandHandler.getInvocation());
   }
 
+  @Test
+  public void testGeneratedConfig() {
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+
+    ReplicationManagerConfiguration rmc =
+        ozoneConfiguration.getObject(ReplicationManagerConfiguration.class);
+
+    //default is not included in ozone-site.xml but generated from annotation
+    //to the ozone-site-generated.xml which should be loaded by the
+    // OzoneConfiguration.
+    Assert.assertEquals(600000, rmc.getEventTimeout());
+
+  }
+
   @After
   public void teardown() throws IOException {
     containerStateManager.close();
diff --git a/hadoop-ozone/integration-test/src/test/resources/core-site.xml 
b/hadoop-ozone/integration-test/src/test/resources/core-site.xml
new file mode 100644
index 0000000..77dd7ef
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/resources/core-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>
diff --git a/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml 
b/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml
new file mode 100644
index 0000000..77dd7ef
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to