KNOX-1153 - Service-level overrides for HA provider configuration in generated 
topologies, and alternate provide config formats.


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/7025086a
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/7025086a
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/7025086a

Branch: refs/heads/master
Commit: 7025086a8a0ab757cb24486b27515f8f5331bfa0
Parents: 0deafca
Author: Phil Zampino <[email protected]>
Authored: Thu Feb 1 18:40:53 2018 -0500
Committer: Phil Zampino <[email protected]>
Committed: Thu Feb 1 18:40:53 2018 -0500

----------------------------------------------------------------------
 .../discovery/ambari/AmbariCluster.java         | 100 ++++-
 .../ambari/AmbariServiceDiscoveryMessages.java  |   4 +
 .../discovery/ambari/WebHdfsUrlCreator.java     |  45 +-
 ...rvice-discovery-zk-config-mapping.properties |  50 +++
 .../discovery/ambari/AmbariClusterTest.java     | 150 +++++++
 .../deploy/HaProviderDeploymentContributor.java |  68 ++-
 .../HaProviderDeploymentContributorTest.java    | 365 ++++++++++++++++
 .../topology/impl/DefaultTopologyService.java   |  15 +-
 .../topology/simple/ProviderConfiguration.java  |  39 ++
 .../simple/ProviderConfigurationParser.java     | 260 ++++++++++++
 .../simple/SimpleDescriptorHandler.java         | 425 ++++++++++++-------
 .../simple/SimpleDescriptorMessages.java        |   9 +-
 .../test/extension/DummyServiceDiscovery.java   |   5 +
 .../PropertiesFileServiceDiscovery.java         |   5 +
 .../simple/ProviderConfigurationParserTest.java | 336 +++++++++++++++
 .../simple/SimpleDescriptorHandlerTest.java     | 199 +++++----
 .../topology/discovery/ServiceDiscovery.java    |  15 +
 17 files changed, 1830 insertions(+), 260 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
----------------------------------------------------------------------
diff --git 
a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
 
b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
index 9d3fa74..2a1db09 100644
--- 
a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
+++ 
b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariCluster.java
@@ -16,22 +16,64 @@
  */
 package org.apache.knox.gateway.topology.discovery.ambari;
 
+import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 
+import java.io.FileInputStream;
+import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 
 class AmbariCluster implements ServiceDiscovery.Cluster {
 
-    private String name = null;
+    private static final AmbariServiceDiscoveryMessages log = 
MessagesFactory.get(AmbariServiceDiscoveryMessages.class);
+
+    private static final String ZK_CONFIG_MAPPING_FILE = 
"ambari-service-discovery-zk-config-mapping.properties";
+
+    static final String ZK_CONFIG_MAPPING_SYSTEM_PROPERTY =
+                                                     
"org.apache.knox.gateway.topology.discovery.ambari.zk.mapping";
+
+    // Mapping of service roles to Hadoop service configurations and ZooKeeper 
property names
+    private static final Properties zooKeeperHAConfigMappings = new 
Properties();
+    static {
+        try {
+            // Load all the default mappings
+            Properties defaults = new Properties();
+            
defaults.load(AmbariServiceDiscovery.class.getClassLoader().getResourceAsStream(ZK_CONFIG_MAPPING_FILE));
+            for (String name : defaults.stringPropertyNames()) {
+                zooKeeperHAConfigMappings.setProperty(name, 
defaults.getProperty(name));
+            }
+
+            // Attempt to apply overriding or additional mappings
+            String overridesPath = 
System.getProperty(ZK_CONFIG_MAPPING_SYSTEM_PROPERTY);
+            if (overridesPath != null) {
+                Properties overrides = new Properties();
+                InputStream in = new FileInputStream(overridesPath);
+                try {
+                    overrides.load(in);
+                } finally {
+                    in.close();
+                }
+
+                for (String name : overrides.stringPropertyNames()) {
+                    zooKeeperHAConfigMappings.setProperty(name, 
overrides.getProperty(name));
+                }
+            }
+        } catch (Exception e) {
+            log.failedToLoadZooKeeperConfigurationMapping(e);
+        }
+    }
+
+    private String name;
 
     private ServiceURLFactory urlFactory;
 
     private Map<String, Map<String, ServiceConfiguration>> 
serviceConfigurations = new HashMap<>();
 
-    private Map<String, AmbariComponent> components = null;
+    private Map<String, AmbariComponent> components;
 
 
     AmbariCluster(String name) {
@@ -91,6 +133,33 @@ class AmbariCluster implements ServiceDiscovery.Cluster {
         return urls;
     }
 
+    @Override
+    public ZooKeeperConfig getZooKeeperConfiguration(String serviceName) {
+        ZooKeeperConfig result = null;
+
+        String config = zooKeeperHAConfigMappings.getProperty(serviceName + 
".config");
+        if (config != null) {
+            String[] parts = config.split(":");
+            if (parts.length == 2) {
+                ServiceConfiguration sc = getServiceConfiguration(parts[0], 
parts[1]);
+                if (sc != null) {
+                    String enabledProp = 
zooKeeperHAConfigMappings.getProperty(serviceName + ".enabled");
+                    String ensembleProp = 
zooKeeperHAConfigMappings.getProperty(serviceName + ".ensemble");
+                    String namespaceProp = 
zooKeeperHAConfigMappings.getProperty(serviceName + ".namespace");
+                    Map<String, String> scProps = sc.getProperties();
+                    if (scProps != null) {
+                        result =
+                            new ZooKeeperConfiguration(enabledProp != null ? 
scProps.get(enabledProp) : null,
+                                                       ensembleProp != null ? 
scProps.get(ensembleProp) : null,
+                                                       namespaceProp != null ? 
scProps.get(namespaceProp) : null);
+                    }
+                }
+            }
+        }
+
+        return result;
+    }
+
 
     static class ServiceConfiguration {
 
@@ -117,4 +186,31 @@ class AmbariCluster implements ServiceDiscovery.Cluster {
         }
     }
 
+
+    static class ZooKeeperConfiguration implements 
ServiceDiscovery.Cluster.ZooKeeperConfig {
+        boolean isEnabled;
+        String ensemble;
+        String namespace;
+
+        ZooKeeperConfiguration(String enabled, String ensemble, String 
namespace) {
+            this.namespace = namespace;
+            this.ensemble = ensemble;
+            this.isEnabled = (enabled != null ? Boolean.valueOf(enabled) : 
true);
+        }
+
+        @Override
+        public boolean isEnabled() {
+            return isEnabled;
+        }
+
+        @Override
+        public String getEnsemble() {
+            return ensemble;
+        }
+
+        @Override
+        public String getNamespace() {
+            return namespace;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
----------------------------------------------------------------------
diff --git 
a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
 
b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
index 12e6078..ffd04a9 100644
--- 
a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
+++ 
b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariServiceDiscoveryMessages.java
@@ -49,6 +49,10 @@ public interface AmbariServiceDiscoveryMessages {
     void failedToLoadServiceDiscoveryURLDefConfiguration(@StackTrace(level = 
MessageLevel.DEBUG) Exception e);
 
     @Message(level = MessageLevel.ERROR,
+             text = "Failed to load ZooKeeper configuration property mappings: 
{1}")
+    void failedToLoadZooKeeperConfigurationMapping(@StackTrace(level = 
MessageLevel.DEBUG) Exception e);
+
+    @Message(level = MessageLevel.ERROR,
              text = "Failed to load service discovery URL definition 
configuration {0}: {1}")
     void failedToLoadServiceDiscoveryURLDefConfiguration(final String 
configuration,
                                                          @StackTrace(level = 
MessageLevel.ERROR) Exception e);

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
----------------------------------------------------------------------
diff --git 
a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
 
b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
index 1c65982..9c7d65b 100644
--- 
a/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
+++ 
b/gateway-discovery-ambari/src/main/java/org/apache/knox/gateway/topology/discovery/ambari/WebHdfsUrlCreator.java
@@ -43,30 +43,31 @@ public class WebHdfsUrlCreator implements ServiceURLCreator 
{
 
     if (SERVICE.equals(service)) {
       AmbariCluster.ServiceConfiguration sc = 
cluster.getServiceConfiguration("HDFS", "hdfs-site");
+      if (sc != null) {
+        // First, check if it's HA config
+        String nameServices = null;
+        AmbariComponent nameNodeComp = cluster.getComponent("NAMENODE");
+        if (nameNodeComp != null) {
+          nameServices = nameNodeComp.getConfigProperty("dfs.nameservices");
+        }
 
-      // First, check if it's HA config
-      String nameServices = null;
-      AmbariComponent nameNodeComp = cluster.getComponent("NAMENODE");
-      if (nameNodeComp != null) {
-        nameServices = nameNodeComp.getConfigProperty("dfs.nameservices");
-      }
-
-      if (nameServices != null && !nameServices.isEmpty()) {
-        // If it is an HA configuration
-        Map<String, String> props = sc.getProperties();
-
-        // Name node HTTP addresses are defined as properties of the form:
-        //      dfs.namenode.http-address.<NAMESERVICES>.nn<INDEX>
-        // So, this iterates over the nn<INDEX> properties until there is no 
such property (since it cannot be known how
-        // many are defined by any other means).
-        int i = 1;
-        String propertyValue = getHANameNodeHttpAddress(props, nameServices, 
i++);
-        while (propertyValue != null) {
-          urls.add(createURL(propertyValue));
-          propertyValue = getHANameNodeHttpAddress(props, nameServices, i++);
+        if (nameServices != null && !nameServices.isEmpty()) {
+          // If it is an HA configuration
+          Map<String, String> props = sc.getProperties();
+
+          // Name node HTTP addresses are defined as properties of the form:
+          //      dfs.namenode.http-address.<NAMESERVICES>.nn<INDEX>
+          // So, this iterates over the nn<INDEX> properties until there is no 
such property (since it cannot be known how
+          // many are defined by any other means).
+          int i = 1;
+          String propertyValue = getHANameNodeHttpAddress(props, nameServices, 
i++);
+          while (propertyValue != null) {
+            urls.add(createURL(propertyValue));
+            propertyValue = getHANameNodeHttpAddress(props, nameServices, i++);
+          }
+        } else { // If it's not an HA configuration, get the single name node 
HTTP address
+          
urls.add(createURL(sc.getProperties().get("dfs.namenode.http-address")));
         }
-      } else { // If it's not an HA configuration, get the single name node 
HTTP address
-        
urls.add(createURL(sc.getProperties().get("dfs.namenode.http-address")));
       }
     }
 

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-discovery-ambari/src/main/resources/ambari-service-discovery-zk-config-mapping.properties
----------------------------------------------------------------------
diff --git 
a/gateway-discovery-ambari/src/main/resources/ambari-service-discovery-zk-config-mapping.properties
 
b/gateway-discovery-ambari/src/main/resources/ambari-service-discovery-zk-config-mapping.properties
new file mode 100644
index 0000000..2f5e4ba
--- /dev/null
+++ 
b/gateway-discovery-ambari/src/main/resources/ambari-service-discovery-zk-config-mapping.properties
@@ -0,0 +1,50 @@
+##########################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##########################################################################
+
+# HIVE
+HIVE.config=HIVE:hive-site
+HIVE.enabled=hive.server2.support.dynamic.service.discovery
+HIVE.ensemble=hive.zookeeper.quorum
+HIVE.namespace=hive.server2.zookeeper.namespace
+
+# HBASE
+WEBHBASE.config=HBASE:hbase-site
+WEBHBASE.ensemble=hbase.zookeeper.quorum
+WEBHBASE.namespace=zookeeper.znode.parent
+
+# KAFKA
+KAFKA.config=KAFKA:kafka-broker
+KAFKA.ensemble=zookeeper.connect
+
+# WEBHDFS
+WEBHDFS.config=HDFS:hdfs-site
+WEBHDFS.ensemble=ha.zookeeper.quorum
+
+# OOZIE
+OOZIE.config=OOZIE:oozie-site
+OOZIE.ensemble=oozie.zookeeper.connection.string
+OOZIE.namespace=oozie.zookeeper.namespace
+
+# YARN
+YARN.config=YARN:yarn-site
+YARN.enabled=yarn.resourcemanager.ha.enabled
+YARN.ensemble=yarn.resourcemanager.zk-address
+
+# WEBHCAT
+WEBHCAT.config=WEBHCAT:webhcat-site
+WEBHCAT.ensemble=templeton.zookeeper.hosts
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClusterTest.java
----------------------------------------------------------------------
diff --git 
a/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClusterTest.java
 
b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClusterTest.java
new file mode 100644
index 0000000..260051b
--- /dev/null
+++ 
b/gateway-discovery-ambari/src/test/java/org/apache/knox/gateway/topology/discovery/ambari/AmbariClusterTest.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.discovery.ambari;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
+import org.apache.knox.test.TestUtils;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+public class AmbariClusterTest {
+
+  @Test
+  public void testHiveZooKeeperConfiguration() throws Exception {
+
+    final boolean isEnabled = true;
+    final String ensemble = "host1:2181,host2:2181,host3:2181";
+    final String namespace = "hiveserver2";
+
+    Map<String, String> serviceConfigProps = new HashMap<>();
+    serviceConfigProps.put("hive.server2.support.dynamic.service.discovery", 
String.valueOf(isEnabled));
+    serviceConfigProps.put("hive.zookeeper.quorum", ensemble);
+    serviceConfigProps.put("hive.server2.zookeeper.namespace", namespace);
+
+    AmbariCluster.ZooKeeperConfig config = getZooKeeperConfiguration("HIVE", 
"hive-site", serviceConfigProps);
+    assertNotNull(config);
+    assertEquals(isEnabled, config.isEnabled());
+    assertEquals(ensemble, config.getEnsemble());
+    assertEquals(namespace, config.getNamespace());
+  }
+
+  @Test
+  public void testWebHBaseZooKeeperConfiguration() throws Exception {
+
+    final boolean isEnabled = true;
+    final String ensemble = "host1:2181,host2:2181,host3:2181";
+    final String namespace = "hbase";
+
+    Map<String, String> serviceConfigProps = new HashMap<>();
+    serviceConfigProps.put("hbase.zookeeper.quorum", ensemble);
+    serviceConfigProps.put("zookeeper.znode.parent", namespace);
+
+    AmbariCluster.ZooKeeperConfig config = 
getZooKeeperConfiguration("WEBHBASE", "HBASE", "hbase-site", 
serviceConfigProps);
+    assertNotNull(config);
+    assertEquals(isEnabled, config.isEnabled());
+    assertEquals(ensemble, config.getEnsemble());
+    assertEquals(namespace, config.getNamespace());
+  }
+
+
+  @Test
+  public void testKafkaZooKeeperConfiguration() throws Exception {
+
+    final boolean isEnabled = true;
+    final String ensemble = "host1:2181,host2:2181,host3:2181";
+
+    Map<String, String> serviceConfigProps = new HashMap<>();
+    serviceConfigProps.put("zookeeper.connect", ensemble);
+
+    AmbariCluster.ZooKeeperConfig config = getZooKeeperConfiguration("KAFKA", 
"kafka-broker", serviceConfigProps);
+    assertNotNull(config);
+    assertEquals(isEnabled, config.isEnabled());
+    assertEquals(ensemble, config.getEnsemble());
+    assertNull(config.getNamespace());
+  }
+
+  @Test
+  public void testWebHDFSZooKeeperConfiguration() throws Exception {
+
+    final boolean isEnabled = true;
+    final String ensemble = "host3:2181,host2:2181,host1:2181";
+
+    Map<String, String> serviceConfigProps = new HashMap<>();
+    serviceConfigProps.put("ha.zookeeper.quorum", ensemble);
+
+    AmbariCluster.ZooKeeperConfig config = 
getZooKeeperConfiguration("WEBHDFS", "HDFS", "hdfs-site", serviceConfigProps);
+    assertNotNull(config);
+    assertEquals(isEnabled, config.isEnabled());
+    assertEquals(ensemble, config.getEnsemble());
+    assertNull(config.getNamespace());
+  }
+
+
+  @Test
+  public void testOozieZooKeeperConfiguration() throws Exception {
+
+    final boolean isEnabled = true;
+    final String ensemble = "host1:2181,host2:2181,host3:2181";
+    final String namespace = "hiveserver2";
+
+    Map<String, String> serviceConfigProps = new HashMap<>();
+    serviceConfigProps.put("oozie.zookeeper.connection.string", ensemble);
+    serviceConfigProps.put("oozie.zookeeper.namespace", namespace);
+
+    AmbariCluster.ZooKeeperConfig config = getZooKeeperConfiguration("OOZIE", 
"oozie-site", serviceConfigProps);
+    assertNotNull(config);
+    assertEquals(isEnabled, config.isEnabled());
+    assertEquals(ensemble, config.getEnsemble());
+    assertEquals(namespace, config.getNamespace());
+  }
+
+
+  private ServiceDiscovery.Cluster.ZooKeeperConfig 
getZooKeeperConfiguration(final String              serviceName,
+                                                                             
final String              configType,
+                                                                             
final Map<String, String> serviceConfigProps) {
+    return getZooKeeperConfiguration(serviceName, serviceName, configType, 
serviceConfigProps);
+  }
+
+
+  private ServiceDiscovery.Cluster.ZooKeeperConfig 
getZooKeeperConfiguration(final String              serviceName,
+                                                                             
final String              componentName,
+                                                                             
final String              configType,
+                                                                             
final Map<String, String> serviceConfigProps) {
+
+    AmbariCluster.ServiceConfiguration sc = 
EasyMock.createNiceMock(AmbariCluster.ServiceConfiguration.class);
+    
EasyMock.expect(sc.getProperties()).andReturn(serviceConfigProps).anyTimes();
+    EasyMock.replay(sc);
+
+    AmbariCluster cluster = new AmbariCluster("test");
+    cluster.addServiceConfiguration(componentName, configType, sc);
+
+    return cluster.getZooKeeperConfiguration(serviceName);
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributor.java
----------------------------------------------------------------------
diff --git 
a/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributor.java
 
b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributor.java
index 2f3664f..00c2562 100644
--- 
a/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributor.java
+++ 
b/gateway-provider-ha/src/main/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributor.java
@@ -26,10 +26,12 @@ import org.apache.knox.gateway.ha.provider.HaServiceConfig;
 import org.apache.knox.gateway.ha.provider.HaServletContextListener;
 import org.apache.knox.gateway.ha.provider.impl.HaDescriptorFactory;
 import org.apache.knox.gateway.ha.provider.impl.HaDescriptorManager;
+import org.apache.knox.gateway.ha.provider.impl.HaServiceConfigConstants;
 import org.apache.knox.gateway.ha.provider.impl.i18n.HaMessages;
 import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 import org.apache.knox.gateway.topology.Provider;
 import org.apache.knox.gateway.topology.Service;
+import org.apache.knox.gateway.topology.Topology;
 import org.jboss.shrinkwrap.api.asset.StringAsset;
 
 import java.io.IOException;
@@ -60,10 +62,29 @@ public class HaProviderDeploymentContributor extends 
ProviderDeploymentContribut
 
    @Override
    public void contributeProvider(DeploymentContext context, Provider 
provider) {
+      Topology topology = context.getTopology();
       Map<String, String> params = provider.getParams();
       HaDescriptor descriptor = HaDescriptorFactory.createDescriptor();
       for (Entry<String, String> entry : params.entrySet()) {
-         HaServiceConfig config = 
HaDescriptorFactory.createServiceConfig(entry.getKey(), entry.getValue());
+         String role = entry.getKey();
+         String roleParams = entry.getValue();
+
+         // Create the config based on whatever is specified at the provider 
level
+         HaServiceConfig config = 
HaDescriptorFactory.createServiceConfig(role, roleParams);
+
+         // Check for service-level param overrides
+         Map<String, String> serviceLevelParams = null;
+         for (Service s : topology.getServices()) {
+            if (s.getRole().equals(role)) {
+               serviceLevelParams = s.getParams();
+               break;
+            }
+         }
+
+         // Apply any service-level param overrides
+         applyParamOverrides(config, serviceLevelParams);
+
+         // Add the reconciled HA service config to the descriptor
          descriptor.addServiceConfig(config);
       }
       StringWriter writer = new StringWriter();
@@ -79,6 +100,51 @@ public class HaProviderDeploymentContributor extends 
ProviderDeploymentContribut
       context.addDescriptor(HA_DESCRIPTOR_NAME, descriptor);
    }
 
+   /**
+    * Apply the param values from the specified map to the specified 
HaServiceConfig.
+    *
+    * @param config             An HaServiceConfig
+    * @param serviceLevelParams Service-level param overrides.
+    */
+   private void applyParamOverrides(HaServiceConfig config, Map<String, 
String> serviceLevelParams) {
+      if (serviceLevelParams != null && !serviceLevelParams.isEmpty()) {
+         String enabled = 
serviceLevelParams.get(HaServiceConfigConstants.CONFIG_PARAM_ENABLED);
+         if (enabled != null) {
+            config.setEnabled(Boolean.valueOf(enabled));
+         }
+
+         String failOverSleep = 
serviceLevelParams.get(HaServiceConfigConstants.CONFIG_PARAM_FAILOVER_SLEEP);
+         if (failOverSleep != null) {
+            config.setFailoverSleep(Integer.valueOf(failOverSleep));
+         }
+
+         String failOverAttempts = 
serviceLevelParams.get(HaServiceConfigConstants.CONFIG_PARAM_MAX_FAILOVER_ATTEMPTS);
+         if (failOverAttempts != null) {
+            config.setMaxFailoverAttempts(Integer.valueOf(failOverAttempts));
+         }
+
+         String retrySleep = 
serviceLevelParams.get(HaServiceConfigConstants.CONFIG_PARAM_RETRY_SLEEP);
+         if (retrySleep != null) {
+            config.setRetrySleep(Integer.valueOf(retrySleep));
+         }
+
+         String retryAttempts = 
serviceLevelParams.get(HaServiceConfigConstants.CONFIG_PARAM_MAX_RETRY_ATTEMPTS);
+         if (retryAttempts != null) {
+            config.setMaxRetryAttempts(Integer.valueOf(retryAttempts));
+         }
+
+         String zkEnsemble = 
serviceLevelParams.get(HaServiceConfigConstants.CONFIG_PARAM_ZOOKEEPER_ENSEMBLE);
+         if (zkEnsemble != null) {
+            config.setZookeeperEnsemble(zkEnsemble);
+         }
+
+         String zkNamespace = 
serviceLevelParams.get(HaServiceConfigConstants.CONFIG_PARAM_ZOOKEEPER_NAMESPACE);
+         if (zkNamespace != null) {
+            config.setZookeeperNamespace(zkNamespace);
+         }
+      }
+   }
+
    @Override
    public void finalizeContribution(DeploymentContext context) {
       if (context.getDescriptor(HA_DESCRIPTOR_NAME) != null) {

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributorTest.java
----------------------------------------------------------------------
diff --git 
a/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributorTest.java
 
b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributorTest.java
index f28baaf..6d3bf1e 100644
--- 
a/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributorTest.java
+++ 
b/gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/deploy/HaProviderDeploymentContributorTest.java
@@ -17,13 +17,35 @@
  */
 package org.apache.knox.gateway.ha.deploy;
 
+import org.apache.knox.gateway.config.GatewayConfig;
+import org.apache.knox.gateway.deploy.DeploymentContext;
 import org.apache.knox.gateway.deploy.ProviderDeploymentContributor;
+import org.apache.knox.gateway.descriptor.FilterParamDescriptor;
+import org.apache.knox.gateway.descriptor.GatewayDescriptor;
+import org.apache.knox.gateway.descriptor.ResourceDescriptor;
+import org.apache.knox.gateway.ha.provider.HaDescriptor;
+import org.apache.knox.gateway.ha.provider.HaServiceConfig;
+import org.apache.knox.gateway.topology.Provider;
+import org.apache.knox.gateway.topology.Service;
+import org.apache.knox.gateway.topology.Topology;
+import org.easymock.EasyMock;
+import org.jboss.shrinkwrap.api.spec.WebArchive;
+import org.jboss.shrinkwrap.descriptor.api.webapp30.WebAppDescriptor;
 import org.junit.Test;
 
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 import java.util.ServiceLoader;
 
 import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 
 
@@ -43,4 +65,347 @@ public class HaProviderDeploymentContributorTest {
       fail( "Failed to find " + 
HaProviderDeploymentContributor.class.getName() + " via service loader." );
    }
 
+   /**
+    * Basically, a backward-compatibility test to ensure that HaProvider 
service params specified ONLY at the provider
+    * level still work.
+    */
+   @Test
+   public void testProviderLevelParams() throws Exception {
+      // Define some provider params
+      Map<String, String> providerParams = new HashMap<>();
+
+      // Specify all the possible params at the HaProvider level for 
TestRoleTwo
+      providerParams.put("TestRoleOne",
+                         "enabled=false;" +
+                         "maxRetryAttempts=5;"+
+                         "retrySleep=50;"+
+                         "maxFailoverAttempts=4;"+
+                         "failoverSleep=40;"+
+                         "zookeeperNamespace=testRoleOne;"+
+                         
"zookeeperEnsemble=http://host1:2181,http://host2:2181";);
+
+      Provider haProvider = createHaProvider(providerParams);
+
+      // Define the topology content (e.g., services)
+      Collection<Service> topologyServices = new HashSet<>();
+
+      // A service with no param overrides
+      Service testRoleOneService = EasyMock.createNiceMock(Service.class);
+      
EasyMock.expect(testRoleOneService.getRole()).andReturn("TestRoleOne").anyTimes();
+      
EasyMock.expect(testRoleOneService.getName()).andReturn("TestRoleOneService").anyTimes();
+      
EasyMock.expect(testRoleOneService.getParams()).andReturn(Collections.emptyMap()).anyTimes();
+      EasyMock.replay(testRoleOneService);
+      topologyServices.add(testRoleOneService);
+
+      Topology topology = EasyMock.createNiceMock(Topology.class);
+      
EasyMock.expect(topology.getServices()).andReturn(topologyServices).anyTimes();
+      EasyMock.replay(topology);
+
+      WebArchive war = EasyMock.createNiceMock(WebArchive.class);
+      EasyMock.replay(war);
+
+      DeploymentContext context = new 
DescriptorCaptureDeploymentContext(topology, war);
+
+      // Invoke the contributor
+      HaProviderDeploymentContributor haPDC = new 
HaProviderDeploymentContributor();
+      haPDC.contributeProvider(context, haProvider);
+
+      HaDescriptor descriptor = 
context.getDescriptor("ha.provider.descriptor");
+      assertNotNull(descriptor);
+      assertEquals(1, descriptor.getServiceConfigs().size());
+
+      validateServiceHaConfig(descriptor.getServiceConfig("TestRoleOne"),
+                              false, 40, 4, 50, 5, "testRoleOne", 
"http://host1:2181,http://host2:2181";);
+   }
+
+   /**
+    * Simple test verifying that HaProvider service params specified ONLY at 
the service level works.
+    */
+   @Test
+   public void testServiceLevelParamOverrides_NoProviderParams() throws 
Exception {
+      // Define some provider params
+      Map<String, String> providerParams = new HashMap<>();
+
+      // Specify all the possible params at the HaProvider level for 
TestRoleTwo
+      providerParams.put("TestRoleOne","");
+
+      Provider haProvider = createHaProvider(providerParams);
+
+      // Define the topology content (e.g., services)
+      Collection<Service> topologyServices = new HashSet<>();
+
+      // Specify all the possible params in the TestRoleOne service level
+      Map<String, String> testRoleOneParams = new HashMap<>();
+      testRoleOneParams.put("enabled", "true");
+      testRoleOneParams.put("maxRetryAttempts", "6");
+      testRoleOneParams.put("retrySleep", "60");
+      testRoleOneParams.put("maxFailoverAttempts", "8");
+      testRoleOneParams.put("failoverSleep", "80");
+      testRoleOneParams.put("zookeeperNamespace", "testRoleOneOverride");
+      testRoleOneParams.put("zookeeperEnsemble", 
"http://host3:2181,http://host4:2181";);
+
+      // A service with all the params overriden
+      Service testRoleOneService = EasyMock.createNiceMock(Service.class);
+      
EasyMock.expect(testRoleOneService.getRole()).andReturn("TestRoleOne").anyTimes();
+      
EasyMock.expect(testRoleOneService.getName()).andReturn("TestRoleOneService").anyTimes();
+      
EasyMock.expect(testRoleOneService.getParams()).andReturn(testRoleOneParams).anyTimes();
+      EasyMock.replay(testRoleOneService);
+      topologyServices.add(testRoleOneService);
+
+      Topology topology = EasyMock.createNiceMock(Topology.class);
+      
EasyMock.expect(topology.getServices()).andReturn(topologyServices).anyTimes();
+      EasyMock.replay(topology);
+
+      WebArchive war = EasyMock.createNiceMock(WebArchive.class);
+      EasyMock.replay(war);
+
+      DeploymentContext context = new 
DescriptorCaptureDeploymentContext(topology, war);
+
+      // Invoke the contributor
+      HaProviderDeploymentContributor haPDC = new 
HaProviderDeploymentContributor();
+      haPDC.contributeProvider(context, haProvider);
+
+      HaDescriptor descriptor = 
context.getDescriptor("ha.provider.descriptor");
+      assertNotNull(descriptor);
+      assertEquals(1, descriptor.getServiceConfigs().size());
+
+      validateServiceHaConfig(descriptor.getServiceConfig("TestRoleOne"),
+                              true, 80, 8, 60, 6, "testRoleOneOverride", 
"http://host3:2181,http://host4:2181";);
+   }
+
+   /**
+    * Verify a mixture of provider-level params and service-level params.
+    */
+   @Test
+   public void testServiceLevelParamOverrides_SubsetProviderParams() throws 
Exception {
+      // Define some provider params
+      Map<String, String> providerParams = new HashMap<>();
+
+      // Specify all the possible params at the HaProvider level for 
TestRoleTwo
+      providerParams.put("TestRoleOne",
+                         "enabled=false;" +
+                         "maxRetryAttempts=5;"+
+                         "maxFailoverAttempts=4;"+
+                         "failoverSleep=40");
+
+      Provider haProvider = createHaProvider(providerParams);
+
+      // Define the topology content (e.g., services)
+      Collection<Service> topologyServices = new HashSet<>();
+
+      // Specify all the possible params in the TestRoleOne service level
+      Map<String, String> testRoleOneParams = new HashMap<>();
+      testRoleOneParams.put("enabled", "true");
+      testRoleOneParams.put("retrySleep", "60");
+      testRoleOneParams.put("zookeeperNamespace", "testRoleOneOverride");
+      testRoleOneParams.put("zookeeperEnsemble", 
"http://host3:2181,http://host4:2181";);
+
+      // A service with all the params overriden
+      Service testRoleOneService = EasyMock.createNiceMock(Service.class);
+      
EasyMock.expect(testRoleOneService.getRole()).andReturn("TestRoleOne").anyTimes();
+      
EasyMock.expect(testRoleOneService.getName()).andReturn("TestRoleOneService").anyTimes();
+      
EasyMock.expect(testRoleOneService.getParams()).andReturn(testRoleOneParams).anyTimes();
+      EasyMock.replay(testRoleOneService);
+      topologyServices.add(testRoleOneService);
+
+      Topology topology = EasyMock.createNiceMock(Topology.class);
+      
EasyMock.expect(topology.getServices()).andReturn(topologyServices).anyTimes();
+      EasyMock.replay(topology);
+
+      WebArchive war = EasyMock.createNiceMock(WebArchive.class);
+      EasyMock.replay(war);
+
+      DeploymentContext context = new 
DescriptorCaptureDeploymentContext(topology, war);
+
+      // Invoke the contributor
+      HaProviderDeploymentContributor haPDC = new 
HaProviderDeploymentContributor();
+      haPDC.contributeProvider(context, haProvider);
+
+      HaDescriptor descriptor = 
context.getDescriptor("ha.provider.descriptor");
+      assertNotNull(descriptor);
+      assertEquals(1, descriptor.getServiceConfigs().size());
+
+      validateServiceHaConfig(descriptor.getServiceConfig("TestRoleOne"),
+                              true, 40, 4, 60, 5, "testRoleOneOverride", 
"http://host3:2181,http://host4:2181";);
+   }
+
+
+   @Test
+   public void testServiceLevelParamOverrides_MultipleMixed() throws Exception 
{
+
+      // Define some provider params
+      Map<String, String> providerParams = new HashMap<>();
+
+      // Specify a subset of the possible HaProvider-level params for 
TestRoleOne
+      providerParams.put("TestRoleOne",
+                         
"enabled=true;maxRetryAttempts=1;retrySleep=10;maxFailoverAttempts=2;failoverSleep=20");
+
+      // Specify all the possible params at the HaProvider level for 
TestRoleTwo
+      providerParams.put("TestRoleTwo",
+                         "enabled=false;" +
+                         "maxRetryAttempts=3;"+
+                         "retrySleep=30;"+
+                         "maxFailoverAttempts=4;"+
+                         "failoverSleep=40;"+
+                         "zookeeperNamespace=testRoleTwo;"+
+                         
"zookeeperEnsemble=http://host1:2181,http://host2:2181";);
+
+      Provider testHaProvider = createHaProvider(providerParams);
+
+      // Define the topology content (e.g., services)
+      Collection<Service> topologyServices = new HashSet<>();
+
+      // A service with no param overrides
+      Service testRoleOneService = EasyMock.createNiceMock(Service.class);
+      
EasyMock.expect(testRoleOneService.getRole()).andReturn("TestRoleOne").anyTimes();
+      
EasyMock.expect(testRoleOneService.getName()).andReturn("TestRoleOneService").anyTimes();
+      
EasyMock.expect(testRoleOneService.getParams()).andReturn(Collections.emptyMap()).anyTimes();
+      EasyMock.replay(testRoleOneService);
+      topologyServices.add(testRoleOneService);
+
+      // Override all the possible params in the TestRoleTwo service level
+      Map<String, String> testRoleTwoParams = new HashMap<>();
+      testRoleTwoParams.put("enabled", "true");
+      testRoleTwoParams.put("maxRetryAttempts", "6");
+      testRoleTwoParams.put("retrySleep", "60");
+      testRoleTwoParams.put("maxFailoverAttempts", "8");
+      testRoleTwoParams.put("failoverSleep", "80");
+      testRoleTwoParams.put("zookeeperNamespace", "testRoleTwoOverride");
+      testRoleTwoParams.put("zookeeperEnsemble", 
"http://host3:2181,http://host4:2181";);
+
+      Service testRoleTwoService = EasyMock.createNiceMock(Service.class);
+      
EasyMock.expect(testRoleTwoService.getRole()).andReturn("TestRoleTwo").anyTimes();
+      
EasyMock.expect(testRoleTwoService.getName()).andReturn("TestRoleTwoService").anyTimes();
+      
EasyMock.expect(testRoleTwoService.getParams()).andReturn(testRoleTwoParams).anyTimes();
+      EasyMock.replay(testRoleTwoService);
+      topologyServices.add(testRoleTwoService);
+
+      Topology topology = EasyMock.createNiceMock(Topology.class);
+      
EasyMock.expect(topology.getServices()).andReturn(topologyServices).anyTimes();
+      EasyMock.replay(topology);
+
+      WebArchive war = EasyMock.createNiceMock(WebArchive.class);
+      EasyMock.replay(war);
+
+      DeploymentContext context = new 
DescriptorCaptureDeploymentContext(topology, war);
+
+      // Invoke the contributor
+      HaProviderDeploymentContributor haPDC = new 
HaProviderDeploymentContributor();
+      haPDC.contributeProvider(context, testHaProvider);
+
+      HaDescriptor descriptor = 
context.getDescriptor("ha.provider.descriptor");
+      assertNotNull(descriptor);
+      assertEquals(2, descriptor.getServiceConfigs().size());
+
+      // Validate the service with no-overrides, checking that the 
provider-level defaults are applied
+      validateServiceHaConfig(descriptor.getServiceConfig("TestRoleOne"),
+                              true, 20, 2, 10, 1, null, null);
+
+      // Validate the service with all-overrides, checking that the 
service-level defaults are applied
+      validateServiceHaConfig(descriptor.getServiceConfig("TestRoleTwo"),
+                              true, 80, 8, 60, 6, "testRoleTwoOverride", 
"http://host3:2181,http://host4:2181";);
+   }
+
+
+   /**
+    *
+    * @param config              The HaServiceConfig to validate
+    * @param isEnabled           The expected enabled param value
+    * @param failoverSleep       The expected failoverSleep param value
+    * @param maxFailoverAttempts The expected maxFailoverAttempts param value
+    * @param retrySleep          The expected retrySleep param value
+    * @param maxRetryAttempts    The expected maxRetryAttempts param value
+    * @param zookeeperNamespace  The expected zookeeperNamespace param value
+    * @param zookeeperEnsemble   The expected zookeeperEnsemble param value
+    */
+   private static void validateServiceHaConfig(HaServiceConfig config,
+                                               boolean         isEnabled,
+                                               int             failoverSleep,
+                                               int             
maxFailoverAttempts,
+                                               int             retrySleep,
+                                               int             
maxRetryAttempts,
+                                               String          
zookeeperNamespace,
+                                               String          
zookeeperEnsemble) throws Exception {
+      assertNotNull(config);
+      assertEquals(isEnabled, config.isEnabled());
+      assertEquals(failoverSleep, config.getFailoverSleep());
+      assertEquals(maxFailoverAttempts, config.getMaxFailoverAttempts());
+      assertEquals(retrySleep, config.getRetrySleep());
+      assertEquals(maxRetryAttempts, config.getMaxRetryAttempts());
+
+      if (zookeeperNamespace == null) {
+         assertNull(config.getZookeeperNamespace());
+      } else {
+         assertEquals(zookeeperNamespace, config.getZookeeperNamespace());
+      }
+
+      if (zookeeperEnsemble== null) {
+         assertNull(config.getZookeeperEnsemble());
+      } else {
+         assertEquals(zookeeperEnsemble, config.getZookeeperEnsemble());
+      }
+   }
+
+   private static Provider createHaProvider(Map<String, String> params) {
+      Provider provider = EasyMock.createNiceMock(Provider.class);
+      EasyMock.expect(provider.getRole()).andReturn("ha").anyTimes();
+      EasyMock.expect(provider.getName()).andReturn("HaProvider").anyTimes();
+      EasyMock.expect(provider.getParams()).andReturn(params).anyTimes();
+      EasyMock.replay(provider);
+      return provider;
+   }
+
+
+   private static class DescriptorCaptureDeploymentContext implements 
DeploymentContext {
+
+      private Topology topology;
+      private WebArchive war;
+      private Map<String, Object> descriptors = new HashMap<>();
+
+      DescriptorCaptureDeploymentContext(Topology topology, WebArchive war) {
+         this.topology = topology;
+         this.war      = war;
+      }
+
+      @Override
+      public GatewayConfig getGatewayConfig() {
+         return null;
+      }
+
+      @Override
+      public Topology getTopology() {
+         return topology;
+      }
+
+      @Override
+      public WebArchive getWebArchive() {
+         return war;
+      }
+
+      @Override
+      public WebAppDescriptor getWebAppDescriptor() {
+         return null;
+      }
+
+      @Override
+      public GatewayDescriptor getGatewayDescriptor() {
+         return null;
+      }
+
+      @Override
+      public void contributeFilter(Service service, ResourceDescriptor 
resource, String role, String name, List<FilterParamDescriptor> params) {
+
+      }
+
+      @Override
+      public void addDescriptor(String name, Object descriptor) {
+         descriptors.put(name, descriptor);
+      }
+
+      @Override
+      public <T> T getDescriptor(String name) {
+         return (T)descriptors.get(name);
+      }
+
+   }
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
 
b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
index 543d294..9c19cc8 100644
--- 
a/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
+++ 
b/gateway-server/src/main/java/org/apache/knox/gateway/services/topology/impl/DefaultTopologyService.java
@@ -52,6 +52,7 @@ import 
org.apache.knox.gateway.topology.builder.TopologyBuilder;
 import org.apache.knox.gateway.topology.discovery.ClusterConfigurationMonitor;
 import org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitor;
 import 
org.apache.knox.gateway.topology.monitor.RemoteConfigurationMonitorFactory;
+import org.apache.knox.gateway.topology.simple.ProviderConfigurationParser;
 import org.apache.knox.gateway.topology.simple.SimpleDescriptor;
 import org.apache.knox.gateway.topology.simple.SimpleDescriptorFactory;
 import org.apache.knox.gateway.topology.simple.SimpleDescriptorHandler;
@@ -775,10 +776,12 @@ public class DefaultTopologyService
       try {
         // When a simple descriptor has been created or modified, generate the 
new topology descriptor
         Map<String, File> result = SimpleDescriptorHandler.handle(file, 
topologiesDir, aliasService);
-        
log.generatedTopologyForDescriptorChange(result.get("topology").getName(), 
file.getName());
+        
log.generatedTopologyForDescriptorChange(result.get(SimpleDescriptorHandler.RESULT_TOPOLOGY).getName(),
+                                                 file.getName());
 
         // Add the provider config reference relationship for handling updates 
to the provider config
-        String providerConfig = 
FilenameUtils.normalize(result.get("reference").getAbsolutePath());
+        String providerConfig =
+                      
FilenameUtils.normalize(result.get(SimpleDescriptorHandler.RESULT_REFERENCE).getAbsolutePath());
         if (!providerConfigReferences.containsKey(providerConfig)) {
           providerConfigReferences.put(providerConfig, new 
ArrayList<String>());
         }
@@ -817,13 +820,9 @@ public class DefaultTopologyService
   /**
    * Change handler for shared provider configurations
    */
-  public static class SharedProviderConfigMonitor extends 
FileAlterationListenerAdaptor
-          implements FileFilter {
+  public static class SharedProviderConfigMonitor extends 
FileAlterationListenerAdaptor implements FileFilter {
 
-    static final List<String> SUPPORTED_EXTENSIONS = new ArrayList<>();
-    static {
-      SUPPORTED_EXTENSIONS.add("xml");
-    }
+    static final List<String> SUPPORTED_EXTENSIONS = 
ProviderConfigurationParser.SUPPORTED_EXTENSIONS;
 
     private DescriptorsMonitor descriptorsMonitor;
     private File descriptorsDir;

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/ProviderConfiguration.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/ProviderConfiguration.java
 
b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/ProviderConfiguration.java
new file mode 100644
index 0000000..405a8de
--- /dev/null
+++ 
b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/ProviderConfiguration.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.simple;
+
+
+import java.util.List;
+import java.util.Map;
+
+public interface ProviderConfiguration {
+
+  List<Provider> getProviders();
+
+
+  interface Provider {
+
+    String getRole();
+
+    String getName();
+
+    boolean isEnabled();
+
+    Map<String, String> getParams();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/ProviderConfigurationParser.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/ProviderConfigurationParser.java
 
b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/ProviderConfigurationParser.java
new file mode 100644
index 0000000..4bfc456
--- /dev/null
+++ 
b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/ProviderConfigurationParser.java
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.knox.gateway.topology.simple;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import org.apache.commons.io.FilenameUtils;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.Unmarshaller;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class ProviderConfigurationParser {
+
+  private static final String EXT_XML  = "xml";
+  private static final String EXT_JSON = "json";
+  private static final String EXT_YML  = "yml";
+  private static final String EXT_YAML = "yaml";
+
+  public static final List<String> SUPPORTED_EXTENSIONS = 
Arrays.asList(EXT_XML, EXT_JSON, EXT_YML, EXT_YAML);
+
+
+  public static ProviderConfiguration parse(String path) throws Exception {
+    return parse(new File(path));
+  }
+
+  public static ProviderConfiguration parse(File file) throws Exception {
+    ProviderConfiguration providerConfig = null;
+
+    String extension = FilenameUtils.getExtension(file.getName());
+    if (SUPPORTED_EXTENSIONS.contains(extension)) {
+
+      if (isXML(extension)) {
+        providerConfig = parseXML(file);
+      } else if (isJSON(extension)) {
+        providerConfig = parseJSON(file);
+      } else if (isYAML(extension)) {
+        providerConfig = parseYAML(file);
+      }
+    } else {
+      throw new IllegalArgumentException("Unsupported provider configuration 
format: " + extension);
+    }
+
+    return providerConfig;
+  }
+
+  private static boolean isXML(String extension) {
+    return EXT_XML.equals(extension);
+  }
+
+  private static boolean isJSON(String extension) {
+    return EXT_JSON.equals(extension);
+  }
+
+  private static boolean isYAML(String extension) {
+    return EXT_YAML.equals(extension) || EXT_YML.equals(extension);
+  }
+
+
+  static ProviderConfiguration parseXML(File file) throws Exception {
+    return parseXML(new FileInputStream(file));
+  }
+
+
+  static ProviderConfiguration parseXML(InputStream in) throws Exception {
+    XMLProviderConfiguration providerConfig = null;
+
+    JAXBContext jaxbContext = 
JAXBContext.newInstance(XMLProviderConfiguration.class);
+    Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
+    providerConfig = (XMLProviderConfiguration) jaxbUnmarshaller.unmarshal(in);
+
+    return providerConfig;
+  }
+
+
+  static ProviderConfiguration parseJSON(File file) throws IOException {
+    return parseJSON(new FileInputStream(file));
+  }
+
+
+  static ProviderConfiguration parseJSON(InputStream in) throws IOException {
+    final ObjectMapper mapper = new ObjectMapper();
+    JSONProviderConfiguration providerConfig = mapper.readValue(in, 
JSONProviderConfiguration.class);
+    return providerConfig;
+  }
+
+
+  static ProviderConfiguration parseYAML(File file) throws IOException {
+    return parseYAML(new FileInputStream(file));
+  }
+
+  static ProviderConfiguration parseYAML(InputStream in) throws IOException {
+    final ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
+    JSONProviderConfiguration providerConfig = mapper.readValue(in, 
JSONProviderConfiguration.class);
+    return providerConfig;
+  }
+
+
+  // XML Provider Configuration Model
+  @XmlRootElement(name="gateway")
+  private static class XMLProviderConfiguration implements 
ProviderConfiguration {
+
+    @XmlElement(name="provider")
+    private List<XMLProvider> providers;
+
+    @Override
+    public List<Provider> getProviders() {
+      List<Provider> plist = new ArrayList<>();
+      if (providers != null) {
+        plist.addAll(providers);
+      }
+      return plist;
+    }
+
+    @XmlAccessorType(XmlAccessType.NONE)
+    private static class XMLProvider implements ProviderConfiguration.Provider 
{
+      @XmlElement
+      private String role;
+
+      @XmlElement
+      private String name;
+
+      @XmlElement
+      private boolean enabled;
+
+      @XmlElement(name = "param")
+      private List<XMLParam> params;
+
+      @Override
+      public String getRole() {
+        return role;
+      }
+
+      @Override
+      public String getName() {
+        return name;
+      }
+
+      @Override
+      public boolean isEnabled() {
+        return enabled;
+      }
+
+      @Override
+      public Map<String, String> getParams() {
+        Map<String, String> result = new HashMap<>();
+        if (params != null) {
+          for (XMLParam p : params) {
+            result.put(p.name, p.value);
+          }
+        }
+        return result;
+      }
+
+      @XmlAccessorType(XmlAccessType.NONE)
+      private static class XMLParam {
+        @XmlElement
+        private String name;
+
+        @XmlElement
+        private String value;
+
+        String getName() {
+          return name;
+        }
+
+        String getValue() {
+          return value;
+        }
+      }
+    }
+
+  }
+
+
+  // JSON/YAML Provider Configuration Model
+  private static class JSONProviderConfiguration implements 
ProviderConfiguration {
+
+    @JsonProperty("providers")
+    private List<JSONProvider> providers;
+
+    @Override
+    public List<Provider> getProviders() {
+      List<Provider> plist = new ArrayList<>();
+      if (providers != null) {
+        plist.addAll(providers);
+      }
+      return plist;
+    }
+
+    private static class JSONProvider implements 
ProviderConfiguration.Provider {
+
+      @JsonProperty("role")
+      private String role;
+
+      @JsonProperty("name")
+      private String name;
+
+      @JsonProperty("enabled")
+      private boolean enabled;
+
+      @JsonProperty("params")
+      private Map<String, String> params;
+
+      @Override
+      public String getRole() {
+        return role;
+      }
+
+      @Override
+      public String getName() {
+        return name;
+      }
+
+      @Override
+      public boolean isEnabled() {
+        return enabled;
+      }
+
+      @Override
+      public Map<String, String> getParams() {
+        Map<String, String> result = new HashMap<>();
+        if (params != null) {
+          result.putAll(params);
+        }
+        return result;
+      }
+    }
+
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
 
b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
index 30786dc..55411d8 100644
--- 
a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
+++ 
b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
@@ -18,11 +18,10 @@ package org.apache.knox.gateway.topology.simple;
 
 import java.io.BufferedWriter;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.FileWriter;
-import java.io.InputStreamReader;
 import java.io.IOException;
 
+import java.io.StringWriter;
 import java.net.URI;
 import java.net.URISyntaxException;
 
@@ -31,6 +30,7 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.knox.gateway.GatewayServer;
 import org.apache.knox.gateway.i18n.messages.MessagesFactory;
@@ -50,6 +50,28 @@ import 
org.apache.knox.gateway.topology.discovery.ServiceDiscoveryFactory;
  */
 public class SimpleDescriptorHandler {
 
+    /**
+     * The name of the property in the result Map for the topology file.
+     */
+    public static final String RESULT_TOPOLOGY  = "topology";
+
+    /**
+     * The name of the property in the result Map for the provider 
configuration file applied to the generated topology.
+     */
+    public static final String RESULT_REFERENCE = "reference";
+
+    private static final String DEFAULT_DISCOVERY_TYPE = "AMBARI";
+
+    private static final String[] PROVIDER_CONFIG_FILE_EXTENSIONS;
+    static {
+
+        PROVIDER_CONFIG_FILE_EXTENSIONS = new 
String[ProviderConfigurationParser.SUPPORTED_EXTENSIONS.size()];
+        int i = 0;
+        for (String ext : ProviderConfigurationParser.SUPPORTED_EXTENSIONS) {
+            PROVIDER_CONFIG_FILE_EXTENSIONS[i++] = "." + ext;
+        }
+    }
+
     private static final Service[] NO_GATEWAY_SERVICES = new Service[]{};
 
     private static final SimpleDescriptorMessages log = 
MessagesFactory.get(SimpleDescriptorMessages.class);
@@ -77,33 +99,13 @@ public class SimpleDescriptorHandler {
     }
 
     public static Map<String, File> handle(SimpleDescriptor desc, File 
srcDirectory, File destDirectory, Service...gatewayServices) {
-        Map<String, File> result = new HashMap<>();
-
-        File topologyDescriptor;
-
-        DefaultServiceDiscoveryConfig sdc = new 
DefaultServiceDiscoveryConfig(desc.getDiscoveryAddress());
-        sdc.setUser(desc.getDiscoveryUser());
-        sdc.setPasswordAlias(desc.getDiscoveryPasswordAlias());
-
-        // Use the discovery type from the descriptor. If it's unspecified, 
employ the default type.
-        String discoveryType = desc.getDiscoveryType();
-        if (discoveryType == null) {
-            discoveryType = "AMBARI";
-        }
 
-        // Use the cached discovery object for the required type, if it has 
already been loaded
-        ServiceDiscovery sd = discoveryInstances.get(discoveryType);
-        if (sd == null) {
-            sd = ServiceDiscoveryFactory.get(discoveryType, gatewayServices);
-            discoveryInstances.put(discoveryType, sd);
-        }
-        ServiceDiscovery.Cluster cluster = sd.discover(sdc, 
desc.getClusterName());
-
-        List<String> validServiceNames = new ArrayList<>();
-
-        Map<String, Map<String, String>> serviceParams = new HashMap<>();
-        Map<String, List<String>>        serviceURLs   = new HashMap<>();
+        List<String>                     validServiceNames = new ArrayList<>();
+        Map<String, Map<String, String>> serviceParams     = new HashMap<>();
+        Map<String, List<String>>        serviceURLs       = new HashMap<>();
 
+        // Discover the cluster details required by the descriptor
+        ServiceDiscovery.Cluster cluster = performDiscovery(desc, 
gatewayServices);
         if (cluster != null) {
             for (SimpleDescriptor.Service descService : desc.getServices()) {
                 String serviceName = descService.getName();
@@ -155,133 +157,53 @@ public class SimpleDescriptorHandler {
             log.unableCreatePasswordForEncryption(desc.getName());
         }
 
-        BufferedWriter fw = null;
-        topologyDescriptor = null;
-        File providerConfig;
-        try {
-            // Verify that the referenced provider configuration exists before 
attempting to reading it
-            providerConfig = 
resolveProviderConfigurationReference(desc.getProviderConfig(), srcDirectory);
-            if (providerConfig == null) {
-                log.failedToResolveProviderConfigRef(desc.getProviderConfig());
-                throw new IllegalArgumentException("Unresolved provider 
configuration reference: " +
-                                                   desc.getProviderConfig() + 
" ; Topology update aborted!");
-            }
-            result.put("reference", providerConfig);
-
-            // TODO: Should the contents of the provider config be validated 
before incorporating it into the topology?
-
-            String topologyFilename = desc.getName();
-            if (topologyFilename == null) {
-                topologyFilename = desc.getClusterName();
-            }
-            topologyDescriptor = new File(destDirectory, topologyFilename + 
".xml");
-
-            fw = new BufferedWriter(new FileWriter(topologyDescriptor));
-
-            fw.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
-
-            
fw.write("<!--==============================================-->\n");
-            fw.write("<!-- DO NOT EDIT. This is an auto-generated file. 
-->\n");
-            
fw.write("<!--==============================================-->\n");
-
-            fw.write("<topology>\n");
-
-            // KNOX-1105 Indicate that this topology was auto-generated
-            fw.write("    <generated>true</generated>\n");
-
-            // Copy the externalized provider configuration content into the 
topology descriptor in-line
-            InputStreamReader policyReader = new InputStreamReader(new 
FileInputStream(providerConfig));
-            char[] buffer = new char[1024];
-            int count;
-            while ((count = policyReader.read(buffer)) > 0) {
-                fw.write(buffer, 0, count);
-            }
-            policyReader.close();
+        // Generate the topology file
+        return generateTopology(desc, srcDirectory, destDirectory, cluster, 
validServiceNames, serviceURLs, serviceParams);
+    }
 
-            // Services
-            // Sort the service names to write the services alphabetically
-            List<String> serviceNames = new ArrayList<>(validServiceNames);
-            Collections.sort(serviceNames);
 
-            // Write the service declarations
-            for (String serviceName : serviceNames) {
-                fw.write("\n");
-                fw.write("    <service>\n");
-                fw.write("        <role>" + serviceName + "</role>\n");
-
-                // URLs
-                List<String> urls = serviceURLs.get(serviceName);
-                if (urls != null) {
-                    for (String url : urls) {
-                        fw.write("        <url>" + url + "</url>\n");
-                    }
-                }
-
-                // Params
-                Map<String, String> svcParams = serviceParams.get(serviceName);
-                if (svcParams != null) {
-                    for (String paramName : svcParams.keySet()) {
-                        fw.write("        <param>\n");
-                        fw.write("            <name>" + paramName + 
"</name>\n");
-                        fw.write("            <value>" + 
svcParams.get(paramName) + "</value>\n");
-                        fw.write("        </param>\n");
-                    }
-                }
+    private static ServiceDiscovery.Cluster performDiscovery(SimpleDescriptor 
desc, Service...gatewayServices) {
+        DefaultServiceDiscoveryConfig sdc = new 
DefaultServiceDiscoveryConfig(desc.getDiscoveryAddress());
+        sdc.setUser(desc.getDiscoveryUser());
+        sdc.setPasswordAlias(desc.getDiscoveryPasswordAlias());
 
-                fw.write("    </service>\n");
-            }
+        // Use the discovery type from the descriptor. If it's unspecified, 
employ the default type.
+        String discoveryType = desc.getDiscoveryType();
+        if (discoveryType == null) {
+            discoveryType = DEFAULT_DISCOVERY_TYPE;
+        }
 
-            // Applications
-            List<SimpleDescriptor.Application> apps = desc.getApplications();
-            if (apps != null) {
-                for (SimpleDescriptor.Application app : apps) {
-                    fw.write("    <application>\n");
-                    fw.write("        <name>" + app.getName() + "</name>\n");
+        // Use the cached discovery object for the required type, if it has 
already been loaded
+        ServiceDiscovery sd = discoveryInstances.get(discoveryType);
+        if (sd == null) {
+            sd = ServiceDiscoveryFactory.get(discoveryType, gatewayServices);
+            discoveryInstances.put(discoveryType, sd);
+        }
 
-                    // URLs
-                    List<String> urls = app.getURLs();
-                    if (urls != null) {
-                        for (String url : urls) {
-                            fw.write("        <url>" + url + "</url>\n");
-                        }
-                    }
+        return sd.discover(sdc, desc.getClusterName());
+    }
 
-                    // Params
-                    Map<String, String> appParams = app.getParams();
-                    if (appParams != null) {
-                        for (String paramName : appParams.keySet()) {
-                            fw.write("        <param>\n");
-                            fw.write("            <name>" + paramName + 
"</name>\n");
-                            fw.write("            <value>" + 
appParams.get(paramName) + "</value>\n");
-                            fw.write("        </param>\n");
-                        }
-                    }
 
-                    fw.write("    </application>\n");
-                }
-            }
+    private static ProviderConfiguration 
handleProviderConfiguration(SimpleDescriptor desc, File providerConfig) {
+        // Verify that the referenced provider configuration exists before 
attempting to read it
+        if (providerConfig == null) {
+            log.failedToResolveProviderConfigRef(desc.getProviderConfig());
+            throw new IllegalArgumentException("Unresolved provider 
configuration reference: " +
+                                               desc.getProviderConfig() + " ; 
Topology update aborted!");
+        }
 
-            fw.write("</topology>\n");
+        // Parse the contents of the referenced provider config
+        ProviderConfiguration parsedConfig = null;
 
-            fw.flush();
-        } catch (IOException e) {
-            
log.failedToGenerateTopologyFromSimpleDescriptor(topologyDescriptor.getName(), 
e);
-            topologyDescriptor.delete();
-        } finally {
-            if (fw != null) {
-                try {
-                    fw.close();
-                } catch (IOException e) {
-                    // ignore
-                }
-            }
+        try {
+            parsedConfig = ProviderConfigurationParser.parse(providerConfig);
+        } catch (Exception e) {
+            log.failedToParseProviderConfig(providerConfig.getAbsolutePath(), 
e);
         }
 
-        result.put("topology", topologyDescriptor);
-        return result;
+        return parsedConfig;
     }
 
-
     /**
      * KNOX-1136
      *
@@ -292,7 +214,7 @@ public class SimpleDescriptorHandler {
      *
      * @return true if the credential was successfully provisioned; otherwise, 
false.
      */
-    private static boolean provisionQueryParamEncryptionCredential(String 
topologyName) {
+    private static boolean provisionQueryParamEncryptionCredential(final 
String topologyName) {
         boolean result = false;
 
         try {
@@ -327,7 +249,7 @@ public class SimpleDescriptorHandler {
     }
 
 
-    private static boolean validateURL(String serviceName, String url) {
+    private static boolean validateURL(final String serviceName, final String 
url) {
         boolean result = false;
 
         if (url != null && !url.isEmpty()) {
@@ -343,7 +265,7 @@ public class SimpleDescriptorHandler {
     }
 
 
-    private static File resolveProviderConfigurationReference(String 
reference, File srcDirectory) {
+    private static File resolveProviderConfigurationReference(final String 
reference, final File srcDirectory) {
         File providerConfig;
 
         // If the reference includes a path
@@ -364,11 +286,15 @@ public class SimpleDescriptorHandler {
                 // Check the shared-providers config location
                 File sharedProvidersDir = new File(srcDirectory, 
"../shared-providers");
                 if (sharedProvidersDir.exists()) {
+                    // Check if it's a valid name without the extension
                     providerConfig = new File(sharedProvidersDir, reference);
                     if (!providerConfig.exists()) {
-                        // Check if it's a valid name without the extension
-                        providerConfig = new File(sharedProvidersDir, 
reference + ".xml");
-                        if (!providerConfig.exists()) {
+                        // Check the supported file extensions to see if the 
reference can be resolved
+                        for (String ext : PROVIDER_CONFIG_FILE_EXTENSIONS) {
+                            providerConfig = new File(sharedProvidersDir, 
reference + ext);
+                            if (providerConfig.exists()) {
+                                break;
+                            }
                             providerConfig = null;
                         }
                     }
@@ -379,4 +305,209 @@ public class SimpleDescriptorHandler {
         return providerConfig;
     }
 
+
+    /**
+     * Generate a topology file, driven by the specified simple descriptor.
+     *
+     * @param desc              The simple descriptor driving the topology 
generation.
+     * @param srcDirectory      The source directory of the simple descriptor.
+     * @param destDirectory     The destination directory for the generated 
topology file.
+     * @param cluster           The discovery details for the referenced 
cluster.
+     * @param validServiceNames The validated service names.
+     * @param serviceURLs       The URLs associated with the valid service 
names.
+     * @param serviceParams     The params associated with the valid service 
names.
+     *
+     * @return A Map with the generated topology file and the referenced 
provider configuration.
+     */
+    private static Map<String, File> generateTopology(final SimpleDescriptor 
desc,
+                                                      final File srcDirectory,
+                                                      final File destDirectory,
+                                                      final 
ServiceDiscovery.Cluster cluster,
+                                                      final List<String> 
validServiceNames,
+                                                      final Map<String, 
List<String>> serviceURLs,
+                                                      final Map<String, 
Map<String, String>> serviceParams) {
+        Map<String, File> result = new HashMap<>();
+
+        BufferedWriter fw = null;
+        File topologyDescriptor = null;
+        try {
+            // Resolve and parse the referenced provider configuration
+            File providerConfigFile = 
resolveProviderConfigurationReference(desc.getProviderConfig(), srcDirectory);
+            ProviderConfiguration providerConfiguration = 
handleProviderConfiguration(desc, providerConfigFile);
+            if (providerConfiguration == null) {
+                throw new IllegalArgumentException("Invalid provider 
configuration.");
+            }
+            result.put(RESULT_REFERENCE, providerConfigFile);
+
+            ProviderConfiguration.Provider haProvider = null;
+            for (ProviderConfiguration.Provider provider : 
providerConfiguration.getProviders()) {
+                if ("ha".equals(provider.getRole())) {
+                    haProvider = provider;
+                    break;
+                }
+            }
+
+            // Collect HA-related service parameters
+            Map<String, ServiceDiscovery.Cluster.ZooKeeperConfig> 
haServiceParams = new HashMap<>();
+            if (cluster != null) {
+                if (haProvider != null) {
+                    // Collect tne roles declared by the HaProvider
+                    Map<String, String> haProviderParams = 
haProvider.getParams();
+                    if (haProviderParams != null) {
+                        Set<String> haProviderRoles = 
haProviderParams.keySet();
+                        for (String haProviderRole : haProviderRoles) {
+                            // For each role declared by the HaProvider, which 
supports ZooKeeper, try to get
+                            // the ZK ensemble and namespace from the cluster.
+                            ServiceDiscovery.Cluster.ZooKeeperConfig zkConfig =
+                                
cluster.getZooKeeperConfiguration(haProviderRole);
+                            if (zkConfig != null) {
+                                haServiceParams.put(haProviderRole, zkConfig);
+                            }
+                        }
+                    }
+                }
+            }
+
+            // Generate the topology content
+            StringWriter sw = new StringWriter();
+
+            sw.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
+            
sw.write("<!--==============================================-->\n");
+            sw.write("<!-- DO NOT EDIT. This is an auto-generated file. 
-->\n");
+            
sw.write("<!--==============================================-->\n");
+
+            sw.write("<topology>\n");
+
+            // KNOX-1105 Indicate that this topology was auto-generated
+            sw.write("    <generated>true</generated>\n");
+
+            // Incorporate the externalized provider configuration content 
into the topology descriptor
+            sw.write("    <gateway>\n");
+            for (ProviderConfiguration.Provider provider : 
providerConfiguration.getProviders()) {
+                sw.write("        <provider>\n");
+                sw.write("            <role>" + provider.getRole() + 
"</role>\n");
+                sw.write("            <name>" + provider.getName() + 
"</name>\n");
+                sw.write("            <enabled>" + provider.isEnabled() + 
"</enabled>\n");
+
+                for (Map.Entry<String, String> param : 
provider.getParams().entrySet()) {
+                    sw.write("            <param>\n");
+                    sw.write("                <name>" + param.getKey() + 
"</name>\n");
+                    sw.write("                <value>" + param.getValue() + 
"</value>\n");
+                    sw.write("            </param>\n");
+                }
+
+                sw.write("        </provider>\n");
+            }
+            sw.write("    </gateway>\n");
+
+            // Services
+            // Sort the service names to write the services alphabetically
+            List<String> serviceNames = new ArrayList<>(validServiceNames);
+            Collections.sort(serviceNames);
+
+            // Write the service declarations
+            for (String serviceName : serviceNames) {
+                sw.write("\n");
+                sw.write("    <service>\n");
+                sw.write("        <role>" + serviceName + "</role>\n");
+
+                // If the service is configured for ZooKeeper-based HA
+                ServiceDiscovery.Cluster.ZooKeeperConfig zkConf = 
haServiceParams.get(serviceName);
+                if (zkConf != null && zkConf.isEnabled() && 
zkConf.getEnsemble() != null) {
+                    // Add the zookeeper params to the map for serialization
+                    Map<String,String> params = 
serviceParams.computeIfAbsent(serviceName, k -> new HashMap<>());
+
+                    String ensemble = zkConf.getEnsemble();
+                    if (ensemble != null) {
+                        params.put("zookeeperEnsemble", ensemble);
+                    }
+
+                    String namespace = zkConf.getNamespace();
+                    if (namespace != null) {
+                        params.put("zookeeperNamespace", namespace);
+                    }
+                } else {
+                    // Serialize the service URLs
+                    List<String> urls = serviceURLs.get(serviceName);
+                    if (urls != null) {
+                        for (String url : urls) {
+                            sw.write("        <url>" + url + "</url>\n");
+                        }
+                    }
+                }
+
+                // Params
+                Map<String, String> svcParams = serviceParams.get(serviceName);
+                if (svcParams != null) {
+                    for (String paramName : svcParams.keySet()) {
+                        sw.write("        <param>\n");
+                        sw.write("            <name>" + paramName + 
"</name>\n");
+                        sw.write("            <value>" + 
svcParams.get(paramName) + "</value>\n");
+                        sw.write("        </param>\n");
+                    }
+                }
+
+                sw.write("    </service>\n");
+            }
+
+            // Applications
+            List<SimpleDescriptor.Application> apps = desc.getApplications();
+            if (apps != null) {
+                for (SimpleDescriptor.Application app : apps) {
+                    sw.write("    <application>\n");
+                    sw.write("        <name>" + app.getName() + "</name>\n");
+
+                    // URLs
+                    List<String> urls = app.getURLs();
+                    if (urls != null) {
+                        for (String url : urls) {
+                            sw.write("        <url>" + url + "</url>\n");
+                        }
+                    }
+
+                    // Params
+                    Map<String, String> appParams = app.getParams();
+                    if (appParams != null) {
+                        for (String paramName : appParams.keySet()) {
+                            sw.write("        <param>\n");
+                            sw.write("            <name>" + paramName + 
"</name>\n");
+                            sw.write("            <value>" + 
appParams.get(paramName) + "</value>\n");
+                            sw.write("        </param>\n");
+                        }
+                    }
+
+                    sw.write("    </application>\n");
+                }
+            }
+
+            sw.write("</topology>\n");
+
+            // Write the generated content to a file
+            String topologyFilename = desc.getName();
+            if (topologyFilename == null) {
+                topologyFilename = desc.getClusterName();
+            }
+            topologyDescriptor = new File(destDirectory, topologyFilename + 
".xml");
+
+            fw = new BufferedWriter(new FileWriter(topologyDescriptor));
+            fw.write(sw.toString());
+            fw.flush();
+        } catch (IOException e) {
+            
log.failedToGenerateTopologyFromSimpleDescriptor(topologyDescriptor.getName(), 
e);
+            topologyDescriptor.delete();
+        } finally {
+            if (fw != null) {
+                try {
+                    fw.close();
+                } catch (IOException e) {
+                    // ignore
+                }
+            }
+        }
+
+        result.put(RESULT_TOPOLOGY, topologyDescriptor);
+
+        return result;
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
 
b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
index 28962f9..869c27b 100644
--- 
a/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
+++ 
b/gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorMessages.java
@@ -37,15 +37,20 @@ public interface SimpleDescriptorMessages {
     void failedToResolveProviderConfigRef(final String providerConfigRef);
 
     @Message(level = MessageLevel.ERROR,
+        text = "Failed to parse the referenced provider configuration {0}: 
{1}")
+    void failedToParseProviderConfig(final String providerConfigRef,
+                                     @StackTrace( level = MessageLevel.DEBUG ) 
Exception e);
+
+    @Message(level = MessageLevel.ERROR,
             text = "URL validation failed for {0} URL {1} : {2}")
     void serviceURLValidationFailed(final String serviceName,
                                     final String url,
-                                    @StackTrace( level = MessageLevel.DEBUG ) 
Exception e );
+                                    @StackTrace( level = MessageLevel.DEBUG ) 
Exception e);
 
     @Message(level = MessageLevel.ERROR,
             text = "Error generating topology {0} from simple descriptor: {1}")
     void failedToGenerateTopologyFromSimpleDescriptor(final String 
topologyFile,
-                                                      @StackTrace( level = 
MessageLevel.DEBUG ) Exception e );
+                                                      @StackTrace( level = 
MessageLevel.DEBUG ) Exception e);
 
     @Message( level = MessageLevel.ERROR,
               text = "Error creating a password for query string encryption 
for {0}: {1}" )

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/DummyServiceDiscovery.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/DummyServiceDiscovery.java
 
b/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/DummyServiceDiscovery.java
index 1758d25..21883d9 100644
--- 
a/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/DummyServiceDiscovery.java
+++ 
b/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/DummyServiceDiscovery.java
@@ -42,6 +42,11 @@ public class DummyServiceDiscovery implements 
ServiceDiscovery {
         public List<String> getServiceURLs(String serviceName) {
             return Collections.singletonList("http://servicehost:9999/dummy";);
         }
+
+        @Override
+        public ZooKeeperConfig getZooKeeperConfiguration(String serviceName) {
+            return null;
+        }
     };
 
     private static final Map<String, Cluster> CLUSTERS = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/knox/blob/7025086a/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/PropertiesFileServiceDiscovery.java
----------------------------------------------------------------------
diff --git 
a/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/PropertiesFileServiceDiscovery.java
 
b/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/PropertiesFileServiceDiscovery.java
index bd3823f..45307f1 100644
--- 
a/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/PropertiesFileServiceDiscovery.java
+++ 
b/gateway-server/src/test/java/org/apache/knox/gateway/topology/discovery/test/extension/PropertiesFileServiceDiscovery.java
@@ -103,6 +103,11 @@ class PropertiesFileServiceDiscovery implements 
ServiceDiscovery {
         public List<String> getServiceURLs(String serviceName) {
             return serviceURLS.get(serviceName);
         }
+
+        @Override
+        public ZooKeeperConfig getZooKeeperConfiguration(String serviceName) {
+            return null; // TODO: PJZ: Implement me
+        }
     }
 
 }

Reply via email to