AMBARI-21620. Set the needed druid/hive properties via ambari (Slim Bouguerra 
via Swapan Shridhar).


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/79ff23fa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/79ff23fa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/79ff23fa

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 79ff23fac2aa08f151781565a59fcbf80061b9f0
Parents: e99f8e8
Author: Swapan Shridhar <sshrid...@hortonworks.com>
Authored: Fri Aug 18 15:06:29 2017 -0700
Committer: Swapan Shridhar <sshrid...@hortonworks.com>
Committed: Fri Aug 18 15:06:29 2017 -0700

----------------------------------------------------------------------
 .../DRUID/0.9.2/package/scripts/druid.py        |  26 ++-
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml | 218 +++++++++++++++++++
 .../HIVE/0.12.0.2.0/metainfo.xml                |   1 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |  11 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |  49 ++++-
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |  14 ++
 .../test/python/stacks/2.6/DRUID/test_druid.py  |   8 +-
 .../stacks/2.6/common/test_stack_advisor.py     |  60 ++++-
 8 files changed, 371 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/79ff23fa/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
 
b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
index fa52247..ec98c3c 100644
--- 
a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
+++ 
b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
@@ -161,24 +161,25 @@ def ensure_hadoop_directories():
                         type="directory",
                         action="create_on_execute",
                         owner=params.druid_user,
+                        group='hadoop',
                         recursive_chown=True,
                         recursive_chmod=True
                         )
 
-    # create the segment storage dir
-    create_hadoop_directory(storage_dir)
+    # create the segment storage dir, users like hive from group hadoop need 
to write to this directory
+    create_hadoop_directory(storage_dir, mode=0775)
 
   # Create HadoopIndexTask hadoopWorkingPath
   hadoop_working_path = 
druid_middlemanager_config['druid.indexer.task.hadoopWorkingPath']
   if hadoop_working_path is not None:
     if hadoop_working_path.startswith(params.hdfs_tmp_dir):
-      params.HdfsResource(params.hdfs_tmp_dir,
-                           type="directory",
-                           action="create_on_execute",
-                           owner=params.hdfs_user,
-                           mode=0777,
-      )
-    create_hadoop_directory(hadoop_working_path)
+        params.HdfsResource(params.hdfs_tmp_dir,
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.hdfs_user,
+                            mode=0777,
+                            )
+    create_hadoop_directory(hadoop_working_path, mode=0775)
 
   # If HDFS is used for storing logs, create Index Task log directory
   indexer_logs_type = druid_common_config['druid.indexer.logs.type']
@@ -187,15 +188,16 @@ def ensure_hadoop_directories():
     create_hadoop_directory(indexer_logs_directory)
 
 
-def create_hadoop_directory(hadoop_dir):
+def create_hadoop_directory(hadoop_dir, mode=0755):
   import params
   params.HdfsResource(hadoop_dir,
                       type="directory",
                       action="create_on_execute",
                       owner=params.druid_user,
-                      mode=0755
+                      group='hadoop',
+                      mode=mode
                       )
-  Logger.info(format("Created Hadoop Directory [{hadoop_dir}]"))
+  Logger.info(format("Created Hadoop Directory [{hadoop_dir}], with mode 
[{mode}]"))
 
 
 def ensure_base_directories():

http://git-wip-us.apache.org/repos/asf/ambari/blob/79ff23fa/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
index ab5c279..d66cf4c 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
@@ -450,6 +450,224 @@ limitations under the License.
     </depends-on>
     <on-ambari-upgrade add="false"/>
   </property>
+
+  <!-- Druid related properties -->
+  <property>
+    <name>hive.druid.broker.address.default</name>
+    <value>localhost:8082</value>
+    <description>Host name of druid router if any or broker</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-router</type>
+        <name>druid.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.metadata.uri</name>
+    <value>jdbc:mysql://localhost:3355/druid</value>
+    <description>URI to connect to the database (for example 
jdbc:mysql://hostname:port/DBName)</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.connector.connectURI</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.coordinator.address.default</name>
+    <value>localhost:8082</value>
+    <description>Host name of druid router if any or broker</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-coordinator</type>
+        <name>druid.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.metadata.password</name>
+    <value>{{druid_metadata_password}}</value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Druid Metadata Password</display-name>
+    <description>Druid meta data storage password</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+   <property>
+    <name>hive.druid.metadata.username</name>
+    <value>druid</value>
+    <description>Username used to connect to druid metadata 
storage</description>
+    <on-ambari-upgrade add="false"/>
+     <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.connector.user</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.druid.indexer.segments.granularity</name>
+    <display-name>Default Granularity for the Druid segments</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>YEAR</value>
+        </entry>
+        <entry>
+          <value>MONTH</value>
+        </entry>
+        <entry>
+          <value>WEEK</value>
+        </entry>
+        <entry>
+          <value>DAY</value>
+        </entry>
+        <entry>
+          <value>HOUR</value>
+        </entry>
+        <entry>
+          <value>MINUTE</value>
+        </entry>
+        <entry>
+          <value>SECOND</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>MINUTE</value>
+    <description>Default Granularity for the segments created by the Druid 
storage handler, this can be overridden per table using table property 
druid.segment.granularity </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+   <property>
+    <name>hive.druid.indexer.partition.size.max</name>
+    <value>5000000</value>
+    <description>Maximum number of records per segment partition</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+   <property>
+    <name>hive.druid.indexer.memory.rownum.max</name>
+    <value>75000</value>
+    <description>Maximum number of records in memory while storing data in 
Druid</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.select.distribute</name>
+    <value>true</value>
+    <description>If it is set to true, we distribute the execution of Druid 
Select queries</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.druid.basePersistDirectory</name>
+    <value></value>
+    <description>
+      Local temporary directory used to persist intermediate indexing state,
+      if empty (recommended) will default to JVM system property 
java.io.tmpdir.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.storage.storageDirectory</name>
+    <value>{{druid_storage_dir}}</value>
+    <description>
+      Druid deep storage location for segments.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hive.druid.metadata.db.type</name>
+    <display-name>Druid metadata storage type </display-name>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mysql</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>postgresql</value>
+          <label>POSTGRESQL</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>mysql</value>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <description>Druid metadata storage type</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.passiveWaitTimeMs</name>
+    <value>30000</value>
+    <description>
+      Wait time in ms default to 30 seconds.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.working.directory</name>
+    <value>/tmp/druid-indexing</value>
+    <description>
+      Default hdfs working directory used to store some intermediate metadata.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.maxTries</name>
+    <value>5</value>
+    <description>
+      Maximum number of http call retries before giving up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>hive.druid.bitmap.type</name>
+    <display-name>Druid metadata storage type </display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>roaring</value>
+        </entry>
+        <entry>
+          <value>concise</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <value>roaring</value>
+    <description>Druid Coding algorithm use to encode the bitmaps</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
   <!-- This property is removed in HDP 2.5 and higher. -->
   <property>
     <name>atlas.rest.address</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/79ff23fa/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
index d2d6437..d84a85d 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
@@ -355,6 +355,7 @@
         <config-type>ranger-hive-security</config-type>
         <config-type>mapred-site</config-type>
         <config-type>application.properties</config-type>
+        <config-type>druid-common</config-type>
       </configuration-dependencies>
     </service>
   </services>

http://git-wip-us.apache.org/repos/asf/ambari/blob/79ff23fa/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 8b3912c..39c06f2 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -854,3 +854,14 @@ if security_enabled:
 # For ldap - hive_check
 hive_ldap_user= 
config['configurations']['hive-env'].get('alert_ldap_username','')
 
hive_ldap_passwd=config['configurations']['hive-env'].get('alert_ldap_password','')
+# For druid metadata password
+druid_metadata_password = ""
+if 'druid-common' in config['configurations'] \
+        and 'druid.metadata.storage.connector.password' in 
config['configurations']['druid-common']:
+  druid_metadata_password = 
config['configurations']['druid-common']['druid.metadata.storage.connector.password']
+
+# For druid storage directory, hive will write segments here
+druid_storage_dir = ""
+if 'druid-common' in config['configurations'] \
+        and 'druid.storage.storageDirectory' in 
config['configurations']['druid-common']:
+  druid_storage_dir = 
config['configurations']['druid-common']['druid.storage.storageDirectory']

http://git-wip-us.apache.org/repos/asf/ambari/blob/79ff23fa/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 1555581..e9b8d15 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -105,7 +105,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
           # recommend HDFS as default deep storage
           extensions_load_list = self.addToList(extensions_load_list, 
"druid-hdfs-storage")
           putCommonProperty("druid.storage.type", "hdfs")
-          putCommonProperty("druid.storage.storageDirectory", 
"/user/druid/data")
+          putCommonProperty("druid.storage.storageDirectory", 
"/apps/druid/warehouse")
           # configure indexer logs configs
           putCommonProperty("druid.indexer.logs.type", "hdfs")
           putCommonProperty("druid.indexer.logs.directory", "/user/druid/logs")
@@ -556,6 +556,53 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
         
putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleName',
 'delete', 'true')
         
putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache',
 'delete', 'true')
 
+    # druid is not in list of services to be installed
+    servicesList = [service["StackServices"]["service_name"] for service in 
services["services"]]
+    if 'DRUID' in servicesList:
+        putHiveSiteProperty = self.putProperty(configurations, "hive-site", 
services)
+        if 'druid-coordinator' in services['configurations']:
+            component_hosts = self.getHostsWithComponent("DRUID", 
'DRUID_COORDINATOR', services, hosts)
+            if component_hosts is not None and len(component_hosts) > 0:
+                # pick the first
+                host = component_hosts[0]
+            druid_coordinator_host_port = str(host['Hosts']['host_name']) + 
":" + str(
+                
services['configurations']['druid-coordinator']['properties']['druid.port'])
+        else:
+            druid_coordinator_host_port = "localhost:8081"
+
+        if 'druid-router' in services['configurations']:
+            component_hosts = self.getHostsWithComponent("DRUID", 
'DRUID_ROUTER', services, hosts)
+            if component_hosts is not None and len(component_hosts) > 0:
+                # pick the first
+                host = component_hosts[0]
+            print host
+            druid_broker_host_port = str(host['Hosts']['host_name']) + ":" + 
str(
+                
services['configurations']['druid-router']['properties']['druid.port'])
+        elif 'druid-broker' in services['configurations']:
+            component_hosts = self.getHostsWithComponent("DRUID", 
'DRUID_BROKER', services, hosts)
+            if component_hosts is not None and len(component_hosts) > 0:
+                # pick the first
+                host = component_hosts[0]
+            druid_broker_host_port = str(host['Hosts']['host_name']) + ":" + 
str(
+                
services['configurations']['druid-broker']['properties']['druid.port'])
+        else:
+            druid_broker_host_port = "localhost:8083"
+
+        if 'druid-common' in services['configurations']:
+            druid_metadata_uri = 
services['configurations']['druid-common']['properties']['druid.metadata.storage.connector.connectURI']
+            druid_metadata_type = 
services['configurations']['druid-common']['properties']['druid.metadata.storage.type']
+            if 'druid.metadata.storage.connector.user' in 
services['configurations']['druid-common']['properties']:
+                druid_metadata_user = 
services['configurations']['druid-common']['properties']['druid.metadata.storage.connector.user']
+            else:
+                druid_metadata_user = ""
+
+        putHiveSiteProperty('hive.druid.broker.address.default', 
druid_broker_host_port)
+        putHiveSiteProperty('hive.druid.coordinator.address.default', 
druid_coordinator_host_port)
+        putHiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
+        putHiveSiteProperty('hive.druid.metadata.username', 
druid_metadata_user)
+        putHiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
+
+
   def recommendHBASEConfigurations(self, configurations, clusterData, 
services, hosts):
     super(HDP26StackAdvisor, 
self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
     if 'hbase-env' in services['configurations'] and 'hbase_user' in 
services['configurations']['hbase-env']['properties']:

http://git-wip-us.apache.org/repos/asf/ambari/blob/79ff23fa/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index a874fa7..4764297 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -75,6 +75,20 @@
             <set key 
="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
               if-type="cluster-env" if-key="security_enabled" if-value="true"/>
           </definition>
+          <definition xsi:type="configure" 
id="hdp_2_6_maint_druid_config_for_hive_hook" summary="Updating druid hive 
related properties">
+            <type>hive-site</type>
+            <set key="hive.druid.metadata.password" 
value="{{druid_metadata_password}}" if-type="druid-common"/>
+            <set key="hive.druid.indexer.segments.granularity" value="MINUTE" 
if-type="druid-common"/>
+            <set key="hive.druid.indexer.partition.size.max" value="5000000" 
if-type="druid-common"/>
+            <set key="hive.druid.indexer.memory.rownum.max" value="75000" 
if-type="druid-common"/>
+            <set key="hive.druid.select.distribute" value="true" 
if-type="druid-common"/>
+            <set key="hive.druid.basePersistDirectory" value="" 
if-type="druid-common"/>
+            <set key="hive.druid.storage.storageDirectory" 
value="{{druid_storage_dir}}" if-type="druid-common"/>
+            <set key="hive.druid.passiveWaitTimeMs" value="30000" 
if-type="druid-common"/>
+            <set key="hive.druid.working.directory" 
value="/tmp/druid-indexing" if-type="druid-common"/>
+            <set key="hive.druid.bitmap.type" value="roaring" 
if-type="druid-common"/>
+
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/79ff23fa/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py 
b/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
index 1d49f9a..ca689b0 100644
--- a/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
+++ b/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
@@ -605,6 +605,7 @@ class TestDruid(RMFTestCase):
                               principal_name = 'missing_principal',
                               user = 'hdfs',
                               owner = 'druid',
+                              group='hadoop',
                               hadoop_conf_dir = 
'/usr/hdp/current/hadoop-client/conf',
                               type = 'directory',
                               action = ['create_on_execute'], 
hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
@@ -628,7 +629,8 @@ class TestDruid(RMFTestCase):
                               type = 'directory',
                               action = ['create_on_execute'], 
hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               dfs_type = '',
-                              mode=0755
+                              group='hadoop',
+                              mode=0775
                               )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
@@ -663,11 +665,12 @@ class TestDruid(RMFTestCase):
                               principal_name = 'missing_principal',
                               user = 'hdfs',
                               owner = 'druid',
+                              group='hadoop',
                               hadoop_conf_dir = 
'/usr/hdp/current/hadoop-client/conf',
                               type = 'directory',
                               action = ['create_on_execute'], 
hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               dfs_type = '',
-                              mode=0755
+                              mode=0775
                               )
 
     self.assertResourceCalled('HdfsResource', '/user/druid/logs',
@@ -681,6 +684,7 @@ class TestDruid(RMFTestCase):
                               principal_name = 'missing_principal',
                               user = 'hdfs',
                               owner = 'druid',
+                              group='hadoop',
                               hadoop_conf_dir = 
'/usr/hdp/current/hadoop-client/conf',
                               type = 'directory',
                               action = ['create_on_execute'], 
hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',

http://git-wip-us.apache.org/repos/asf/ambari/blob/79ff23fa/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 3ba18d8..63e2229 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -990,6 +990,22 @@ class TestHDP26StackAdvisor(TestCase):
       },
       "hive-atlas-application.properties" : {
         "properties": {}
+      },
+      "druid-coordinator": {
+        "properties": {'druid.port': 8081}
+      },
+      "druid-broker": {
+        "properties": {'druid.port': 8082}
+      },
+      "druid-common": {
+        "properties": {
+          "database_name": "druid",
+          "metastore_hostname": "c6401.ambari.apache.org",
+          "druid.metadata.storage.type": "mysql",
+          'druid.metadata.storage.connector.port': '3306',
+          'druid.metadata.storage.connector.user': 'druid',
+          'druid.metadata.storage.connector.connectURI': 
'jdbc:mysql://c6401.ambari.apache.org:3306/druid?createDatabaseIfNotExist=true'
+        }
       }
     }
 
@@ -1053,6 +1069,37 @@ class TestHDP26StackAdvisor(TestCase):
               "service_version": "0.7.0"
             },
             "components": []
+          },
+          {
+            "StackServices": {
+              "service_name": "DRUID",
+            },
+            "components": [
+              {
+                "StackServiceComponents": {
+                  "component_name": "DRUID_COORDINATOR",
+                  "hostnames": ["c6401.ambari.apache.org"]
+                },
+              },
+              {
+                "StackServiceComponents": {
+                  "component_name": "DRUID_OVERLORD",
+                  "hostnames": ["c6401.ambari.apache.org"]
+                },
+              },
+              {
+                "StackServiceComponents": {
+                  "component_name": "DRUID_BROKER",
+                  "hostnames": ["c6401.ambari.apache.org"]
+                },
+              },
+              {
+                "StackServiceComponents": {
+                  "component_name": "DRUID_ROUTER",
+                  "hostnames": ["c6401.ambari.apache.org"]
+                },
+              }
+            ]
           }
         ],
       "Versions": {
@@ -1143,7 +1190,12 @@ class TestHDP26StackAdvisor(TestCase):
           'hive.security.metastore.authorization.manager': 
'org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider',
           'hive.exec.dynamic.partition.mode': 'strict',
           'hive.optimize.sort.dynamic.partition': 'false',
-          'hive.server2.enable.doAs': 'false'
+          'hive.server2.enable.doAs': 'false',
+          'hive.druid.broker.address.default': 'c6401.ambari.apache.org:8082',
+          'hive.druid.coordinator.address.default': 
'c6401.ambari.apache.org:8081',
+          'hive.druid.metadata.db.type': 'mysql',
+          'hive.druid.metadata.uri': 
'jdbc:mysql://c6401.ambari.apache.org:3306/druid?createDatabaseIfNotExist=true',
+          'hive.druid.metadata.username': 'druid',
         },
         'property_attributes': {
           'hive.tez.container.size': {
@@ -1240,6 +1292,12 @@ class TestHDP26StackAdvisor(TestCase):
       'properties': {}
     }
 
+    # case there is router in the stack
+    services['configurations']['druid-router'] = {}
+    services['configurations']['druid-router']['properties'] = {}
+    services['configurations']['druid-router']['properties']['druid.port'] = 
8083
+    expected['hive-site']['properties']['hive.druid.broker.address.default'] = 
'c6401.ambari.apache.org:8083'
+
     recommendedConfigurations = {}
     self.stackAdvisor.recommendHIVEConfigurations(recommendedConfigurations, 
clusterData, services, hosts)
     self.assertEquals(recommendedConfigurations, expected)

Reply via email to