http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
index 0b03af4..30045f8 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
@@ -68,9 +68,6 @@ class HistoryserverWindows(HistoryServer):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class HistoryServerDefault(HistoryServer):
-  def get_component_name(self):
-    return "hadoop-mapreduce-historyserver"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
@@ -78,7 +75,7 @@ class HistoryServerDefault(HistoryServer):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-mapreduce-historyserver", params.version)
+      stack_select.select_packages(params.version)
       # MC Hammer said, "Can't touch this"
       copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, 
skip=params.sysprep_skip_copy_tarballs_hdfs)
       copy_to_hdfs("tez", params.user_group, params.hdfs_user, 
skip=params.sysprep_skip_copy_tarballs_hdfs)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
index 424157b..efcb2da 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
@@ -82,16 +82,13 @@ class MapReduce2ClientWindows(MapReduce2Client):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class MapReduce2ClientDefault(MapReduce2Client):
-  def get_component_name(self):
-    return "hadoop-client"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
+      stack_select.select_packages(params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
index 5acb20b..ed83402 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
@@ -66,9 +66,6 @@ class NodemanagerWindows(Nodemanager):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class NodemanagerDefault(Nodemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-nodemanager"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing NodeManager Stack Upgrade pre-restart")
     import params
@@ -76,7 +73,7 @@ class NodemanagerDefault(Nodemanager):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-nodemanager", params.version)
+      stack_select.select_packages(params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing NodeManager Stack Upgrade post-restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index b929af0..ecaea4c 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -105,9 +105,6 @@ class ResourcemanagerWindows(Resourcemanager):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ResourcemanagerDefault(Resourcemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-resourcemanager"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade post-restart")
     import params
@@ -115,7 +112,7 @@ class ResourcemanagerDefault(Resourcemanager):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-resourcemanager", params.version)
+      stack_select.select_packages(params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
index beea8b9..ef4f7ea 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
@@ -51,16 +51,13 @@ class YarnClientWindows(YarnClient):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class YarnClientDefault(YarnClient):
-  def get_component_name(self):
-    return "hadoop-client"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
+      stack_select.select_packages(params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
index b1e0c16..a435b80 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
@@ -66,9 +66,6 @@ class 
ApplicationTimelineServerWindows(ApplicationTimelineServer):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ApplicationTimelineServerDefault(ApplicationTimelineServer):
-  def get_component_name(self):
-    return "hadoop-yarn-timelineserver"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
@@ -76,7 +73,7 @@ class 
ApplicationTimelineServerDefault(ApplicationTimelineServer):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-timelineserver", params.version)
+      stack_select.select_packages(params.version)
 
   def status(self, env):
     import status_params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
index d886244..3938c15 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
@@ -68,9 +68,6 @@ class HistoryserverWindows(HistoryServer):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class HistoryServerDefault(HistoryServer):
-  def get_component_name(self):
-    return "hadoop-mapreduce-historyserver"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
@@ -78,7 +75,7 @@ class HistoryServerDefault(HistoryServer):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-mapreduce-historyserver", params.version)
+      stack_select.select_packages(params.version)
       # MC Hammer said, "Can't touch this"
       copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, 
skip=params.sysprep_skip_copy_tarballs_hdfs)
       copy_to_hdfs("tez", params.user_group, params.hdfs_user, 
skip=params.sysprep_skip_copy_tarballs_hdfs)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py
 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py
index 424157b..efcb2da 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py
@@ -82,16 +82,13 @@ class MapReduce2ClientWindows(MapReduce2Client):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class MapReduce2ClientDefault(MapReduce2Client):
-  def get_component_name(self):
-    return "hadoop-client"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
+      stack_select.select_packages(params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
index 5acb20b..ed83402 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
@@ -66,9 +66,6 @@ class NodemanagerWindows(Nodemanager):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class NodemanagerDefault(Nodemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-nodemanager"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing NodeManager Stack Upgrade pre-restart")
     import params
@@ -76,7 +73,7 @@ class NodemanagerDefault(Nodemanager):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-nodemanager", params.version)
+      stack_select.select_packages(params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing NodeManager Stack Upgrade post-restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
index 961fe63..4ceff1c 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
@@ -105,9 +105,6 @@ class ResourcemanagerWindows(Resourcemanager):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ResourcemanagerDefault(Resourcemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-resourcemanager"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade post-restart")
     import params
@@ -115,7 +112,7 @@ class ResourcemanagerDefault(Resourcemanager):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-resourcemanager", params.version)
+      stack_select.select_packages(params.version)
 
   def disable_security(self, env):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
index beea8b9..ef4f7ea 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
@@ -51,16 +51,13 @@ class YarnClientWindows(YarnClient):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class YarnClientDefault(YarnClient):
-  def get_component_name(self):
-    return "hadoop-client"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
+      stack_select.select_packages(params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
 
b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
index ba46dc8..f8f6e3d 100644
--- 
a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
+++ 
b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
@@ -40,9 +40,6 @@ from resource_management.libraries.script.script import Script
 
 class Master(Script):
 
-  def get_component_name(self):
-    return "zeppelin-server"
-
   def install(self, env):
     import params
     env.set_params(params)
@@ -255,7 +252,7 @@ class Master(Script):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
format_stack_version(params.version)):
       conf_select.select(params.stack_name, "zeppelin", params.version)
-      stack_select.select("zeppelin-server", params.version)
+      stack_select.select_packages(params.version)
 
   def set_interpreter_settings(self, config_data):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/package/scripts/master.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/package/scripts/master.py
 
b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/package/scripts/master.py
index c2f81639..c4fdfcc 100644
--- 
a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/package/scripts/master.py
+++ 
b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.3.0/package/scripts/master.py
@@ -40,9 +40,6 @@ from resource_management.libraries.script.script import Script
 
 class Master(Script):
 
-  def get_component_name(self):
-    return "zeppelin-server"
-
   def install(self, env):
     import params
     env.set_params(params)
@@ -257,7 +254,7 @@ class Master(Script):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
format_stack_version(params.version)):
       conf_select.select(params.stack_name, "zeppelin", params.version)
-      stack_select.select("zeppelin-server", params.version)
+      stack_select.select_packages(params.version)
 
   def set_interpreter_settings(self, config_data):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_client.py
 
b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_client.py
index 3137f1a..e52522a 100644
--- 
a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_client.py
+++ 
b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_client.py
@@ -56,8 +56,6 @@ class ZookeeperClient(Script):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ZookeeperClientLinux(ZookeeperClient):
-  def get_component_name(self):
-    return "zookeeper-client"
 
   def install(self, env):
     self.install_packages(env)
@@ -70,7 +68,7 @@ class ZookeeperClientLinux(ZookeeperClient):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
format_stack_version(params.version)):
       conf_select.select(params.stack_name, "zookeeper", params.version)
-      stack_select.select("zookeeper-client", params.version)
+      stack_select.select_packages(params.version)
 
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class ZookeeperClientWindows(ZookeeperClient):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
 
b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
index 8d9de9e..8d6acd9 100644
--- 
a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
+++ 
b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
@@ -64,9 +64,6 @@ class ZookeeperServer(Script):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ZookeeperServerLinux(ZookeeperServer):
 
-  def get_component_name(self):
-    return "zookeeper-server"
-
   def install(self, env):
     self.install_packages(env)
     self.configure(env)
@@ -78,7 +75,7 @@ class ZookeeperServerLinux(ZookeeperServer):
 
     if check_stack_feature(StackFeature.ROLLING_UPGRADE, 
format_stack_version(params.version)):
       conf_select.select(params.stack_name, "zookeeper", params.version)
-      stack_select.select("zookeeper-server", params.version)
+      stack_select.select_packages(params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     # during an express upgrade, there is no quorum, so don't try to perform 
the check

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index e6ec285..1915e9f 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -262,6 +262,21 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
+  <!-- Define stack_select_packages property in the base stack. DO NOT 
override this property for each stack version -->
+  <property>
+    <name>stack_select_packages</name>
+    <value/>
+    <description>Associations between component and stack-select 
tools.</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_select_packages.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
   <property>
     <name>stack_root</name>
     <value>{"HDP":"/usr/hdp"}</value>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
index 8a583b3..8bae9e6 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
@@ -28,7 +28,7 @@ class AfterInstallHook(Hook):
     import params
 
     env.set_params(params)
-    setup_stack_symlinks()
+    setup_stack_symlinks(self.stroutfile)
     setup_config()
 
     link_configs(self.stroutfile)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index 0dfde27..b517eba 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@ -43,9 +43,6 @@ stack_version_unformatted = 
config['hostLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 major_stack_version = get_major_version(stack_version_formatted)
 
-# current host stack version
-current_version = default("/hostLevelParams/current_version", None)
-
 # service name
 service_name = config['serviceName']
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
index 36a202f..8b61a93 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -27,13 +27,12 @@ from resource_management.libraries.functions import 
conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.fcntl_based_process_lock import 
FcntlBasedProcessLock
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.script import Script
 
 
-def setup_stack_symlinks():
+def setup_stack_symlinks(struct_out_file):
   """
   Invokes <stack-selector-tool> set all against a calculated fully-qualified, 
"normalized" version based on a
   stack version, such as "2.3". This should always be called after a component 
has been
@@ -42,18 +41,30 @@ def setup_stack_symlinks():
   :return:
   """
   import params
-  if params.stack_version_formatted != "" and 
compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    # try using the exact version first, falling back in just the stack if 
it's not defined
-    # which would only be during an intial cluster installation
-    version = params.current_version if params.current_version is not None 
else params.stack_version_unformatted
-
-    if not params.upgrade_suspended:
-      if params.host_sys_prepped:
-        Logger.warning("Skipping running stack-selector-tool for stack {0} as 
its a sys_prepped host. This may cause symlink pointers not to be created for 
HDP componets installed later on top of an already sys_prepped 
host.".format(version))
-        return
-      # On parallel command execution this should be executed by a single 
process at a time.
-      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = 
params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-        stack_select.select_all(version)
+  if params.upgrade_suspended:
+    Logger.warning("Skipping running stack-selector-tool because there is a 
suspended upgrade")
+    return
+
+  if params.host_sys_prepped:
+    Logger.warning("Skipping running stack-selector-tool becase this is a 
sys_prepped host. This may cause symlink pointers not to be created for HDP 
componets installed later on top of an already sys_prepped host.")
+    return
+
+  # get the packages which the stack-select tool should be used on
+  stack_select_packages = 
stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
+  if stack_select_packages is None:
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("There is no advertised version for this component stored in 
{0}".format(struct_out_file))
+    return
+
+  # On parallel command execution this should be executed by a single process 
at a time.
+  with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = 
params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+    for package in stack_select_packages:
+      stack_select.select(package, json_version)
+
 
 def setup_config():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_select_packages.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_select_packages.json
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_select_packages.json
new file mode 100644
index 0000000..2747188
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_select_packages.json
@@ -0,0 +1,952 @@
+{
+  "HDP": {
+    "stack-select": {
+      "ACCUMULO": {
+        "ACCUMULO_CLIENT": {
+          "STACK-SELECT-PACKAGE": "accumulo-client",
+          "INSTALL": [
+            "accumulo-client"
+          ],
+          "PATCH": [
+            "accumulo-client"
+          ],
+          "STANDARD": [
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_GC": {
+          "STACK-SELECT-PACKAGE": "accumulo-gc",
+          "INSTALL": [
+            "accumulo-gc"
+          ],
+          "PATCH": [
+            "accumulo-gc"
+          ],
+          "STANDARD": [
+            "accumulo-gc",
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_MASTER": {
+          "STACK-SELECT-PACKAGE": "accumulo-master",
+          "INSTALL": [
+            "accumulo-master"
+          ],
+          "PATCH": [
+            "accumulo-master"
+          ],
+          "STANDARD": [
+            "accumulo-master",
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_MONITOR": {
+          "STACK-SELECT-PACKAGE": "accumulo-monitor",
+          "INSTALL": [
+            "accumulo-monitor"
+          ],
+          "PATCH": [
+            "accumulo-monitor"
+          ],
+          "STANDARD": [
+            "accumulo-monitor",
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_TRACER": {
+          "STACK-SELECT-PACKAGE": "accumulo-tracer",
+          "INSTALL": [
+            "accumulo-tracer"
+          ],
+          "PATCH": [
+            "accumulo-tracer"
+          ],
+          "STANDARD": [
+            "accumulo-tracer",
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_TSERVER": {
+          "STACK-SELECT-PACKAGE": "accumulo-tablet",
+          "INSTALL": [
+            "accumulo-tablet"
+          ],
+          "PATCH": [
+            "accumulo-tablet"
+          ],
+          "STANDARD": [
+            "accumulo-tablet",
+            "accumulo-client"
+          ]
+        }
+      },
+      "ATLAS": {
+        "ATLAS_CLIENT": {
+          "STACK-SELECT-PACKAGE": "atlas-client",
+          "INSTALL": [
+            "atlas-client"
+          ],
+          "PATCH": [
+            "atlas-client"
+          ],
+          "STANDARD": [
+            "atlas-client"
+          ]
+        },
+        "ATLAS_SERVER": {
+          "STACK-SELECT-PACKAGE": "atlas-server",
+          "INSTALL": [
+            "atlas-server"
+          ],
+          "PATCH": [
+            "atlas-server"
+          ],
+          "STANDARD": [
+            "atlas-server"
+          ]
+        }
+      },
+      "DRUID": {
+        "DRUID_COORDINATOR": {
+          "STACK-SELECT-PACKAGE": "druid-coordinator",
+          "INSTALL": [
+            "druid-coordinator"
+          ],
+          "PATCH": [
+            "druid-coordinator"
+          ],
+          "STANDARD": [
+            "druid-coordinator"
+          ]
+        },
+        "DRUID_OVERLORD": {
+          "STACK-SELECT-PACKAGE": "druid-overlord",
+          "INSTALL": [
+            "druid-overlord"
+          ],
+          "PATCH": [
+            "druid-overlord"
+          ],
+          "STANDARD": [
+            "druid-overlord"
+          ]
+        },
+        "DRUID_HISTORICAL": {
+          "STACK-SELECT-PACKAGE": "druid-historical",
+          "INSTALL": [
+            "druid-historical"
+          ],
+          "PATCH": [
+            "druid-historical"
+          ],
+          "STANDARD": [
+            "druid-historical"
+          ]
+        },
+        "DRUID_BROKER": {
+          "STACK-SELECT-PACKAGE": "druid-broker",
+          "INSTALL": [
+            "druid-broker"
+          ],
+          "PATCH": [
+            "druid-broker"
+          ],
+          "STANDARD": [
+            "druid-broker"
+          ]
+        },
+        "DRUID_MIDDLEMANAGER": {
+          "STACK-SELECT-PACKAGE": "druid-middlemanager",
+          "INSTALL": [
+            "druid-middlemanager"
+          ],
+          "PATCH": [
+            "druid-middlemanager"
+          ],
+          "STANDARD": [
+            "druid-middlemanager"
+          ]
+        },
+        "DRUID_ROUTER": {
+          "STACK-SELECT-PACKAGE": "druid-router",
+          "INSTALL": [
+            "druid-router"
+          ],
+          "PATCH": [
+            "druid-router"
+          ],
+          "STANDARD": [
+            "druid-router"
+          ]
+        },
+        "DRUID_SUPERSET": {
+          "STACK-SELECT-PACKAGE": "druid-superset",
+          "INSTALL": [
+            "druid-superset"
+          ],
+          "PATCH": [
+            "druid-superset"
+          ],
+          "STANDARD": [
+            "druid-superset"
+          ]
+        }
+      },
+      "FALCON": {
+        "FALCON_CLIENT": {
+          "STACK-SELECT-PACKAGE": "falcon-client",
+          "INSTALL": [
+            "falcon-client"
+          ],
+          "PATCH": [
+            "falcon-client"
+          ],
+          "STANDARD": [
+            "falcon-client"
+          ]
+        },
+        "FALCON_SERVER": {
+          "STACK-SELECT-PACKAGE": "falcon-server",
+          "INSTALL": [
+            "falcon-server"
+          ],
+          "PATCH": [
+            "falcon-server"
+          ],
+          "STANDARD": [
+            "falcon-server"
+          ]
+        }
+      },
+      "FLUME": {
+        "FLUME_HANDLER": {
+          "STACK-SELECT-PACKAGE": "flume-server",
+          "INSTALL": [
+            "flume-server"
+          ],
+          "PATCH": [
+            "flume-server"
+          ],
+          "STANDARD": [
+            "flume-server"
+          ]
+        }
+      },
+      "HBASE": {
+        "HBASE_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hbase-client",
+          "INSTALL": [
+            "hbase-client"
+          ],
+          "PATCH": [
+            "hbase-client"
+          ],
+          "STANDARD": [
+            "hbase-client",
+            "phoenix-client",
+            "hadoop-client"
+          ]
+        },
+        "HBASE_MASTER": {
+          "STACK-SELECT-PACKAGE": "hbase-master",
+          "INSTALL": [
+            "hbase-master"
+          ],
+          "PATCH": [
+            "hbase-master"
+          ],
+          "STANDARD": [
+            "hbase-master"
+          ]
+        },
+        "HBASE_REGIONSERVER": {
+          "STACK-SELECT-PACKAGE": "hbase-regionserver",
+          "INSTALL": [
+            "hbase-regionserver"
+          ],
+          "PATCH": [
+            "hbase-regionserver"
+          ],
+          "STANDARD": [
+            "hbase-regionserver"
+          ]
+        },
+        "PHOENIX_QUERY_SERVER": {
+          "STACK-SELECT-PACKAGE": "phoenix-server",
+          "INSTALL": [
+            "phoenix-server"
+          ],
+          "PATCH": [
+            "phoenix-server"
+          ],
+          "STANDARD": [
+            "phoenix-server"
+          ]
+        }
+      },
+      "HDFS": {
+        "DATANODE": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-datanode",
+          "INSTALL": [
+            "hadoop-hdfs-datanode"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-datanode"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-datanode"
+          ]
+        },
+        "HDFS_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        },
+        "NAMENODE": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-namenode",
+          "INSTALL": [
+            "hadoop-hdfs-namenode"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-namenode"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-namenode"
+          ]
+        },
+        "NFS_GATEWAY": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-nfs3",
+          "INSTALL": [
+            "hadoop-hdfs-nfs3"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-nfs3"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-nfs3"
+          ]
+        },
+        "JOURNALNODE": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-journalnode",
+          "INSTALL": [
+            "hadoop-hdfs-journalnode"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-journalnode"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-journalnode"
+          ]
+        },
+        "SECONDARY_NAMENODE": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-secondarynamenode",
+          "INSTALL": [
+            "hadoop-hdfs-secondarynamenode"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-secondarynamenode"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-secondarynamenode"
+          ]
+        },
+        "ZKFC": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-zkfc",
+          "INSTALL": [
+            "hadoop-hdfs-zkfc"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-zkfc"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-zkfc"
+          ]
+        }
+      },
+      "HIVE": {
+        "HCAT": {
+          "STACK-SELECT-PACKAGE": "hive-webhcat",
+          "INSTALL": [
+            "hive-webhcat"
+          ],
+          "PATCH": [
+            "hive-webhcat"
+          ],
+          "STANDARD": [
+            "hive-webhcat"
+          ]
+        },
+        "HIVE_METASTORE": {
+          "STACK-SELECT-PACKAGE": "hive-metastore",
+          "INSTALL": [
+            "hive-metastore"
+          ],
+          "PATCH": [
+            "hive-metastore"
+          ],
+          "STANDARD": [
+            "hive-metastore"
+          ]
+        },
+        "HIVE_SERVER": {
+          "STACK-SELECT-PACKAGE": "hive-server2",
+          "INSTALL": [
+            "hive-server2"
+          ],
+          "PATCH": [
+            "hive-server2"
+          ],
+          "STANDARD": [
+            "hive-server2"
+          ]
+        },
+        "HIVE_SERVER_INTERACTIVE": {
+          "STACK-SELECT-PACKAGE": "hive-server2-hive2",
+          "INSTALL": [
+            "hive-server2-hive2"
+          ],
+          "PATCH": [
+            "hive-server2-hive2"
+          ],
+          "STANDARD": [
+            "hive-server2-hive2"
+          ]
+        },
+        "HIVE_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        },
+        "WEBHCAT_SERVER": {
+          "STACK-SELECT-PACKAGE": "hive-webhcat",
+          "INSTALL": [
+            "hive-webhcat"
+          ],
+          "PATCH": [
+            "hive-webhcat"
+          ],
+          "STANDARD": [
+            "hive-webhcat"
+          ]
+        }
+      },
+      "KAFKA": {
+        "KAFKA_BROKER": {
+          "STACK-SELECT-PACKAGE": "kafka-broker",
+          "INSTALL": [
+            "kafka-broker"
+          ],
+          "PATCH": [
+            "kafka-broker"
+          ],
+          "STANDARD": [
+            "kafka-broker"
+          ]
+        }
+      },
+      "KNOX": {
+        "KNOX_GATEWAY": {
+          "STACK-SELECT-PACKAGE": "knox-server",
+          "INSTALL": [
+            "knox-server"
+          ],
+          "PATCH": [
+            "knox-server"
+          ],
+          "STANDARD": [
+            "knox-server"
+          ]
+        }
+      },
+      "MAHOUT": {
+        "MAHOUT": {
+          "STACK-SELECT-PACKAGE": "mahout-client",
+          "INSTALL": [
+            "mahout-client"
+          ],
+          "PATCH": [
+            "mahout-client"
+          ],
+          "STANDARD": [
+            "mahout-client"
+          ]
+        }
+      },
+      "MAPREDUCE2": {
+        "HISTORYSERVER": {
+          "STACK-SELECT-PACKAGE": "hadoop-mapreduce-historyserver",
+          "INSTALL": [
+            "hadoop-mapreduce-historyserver"
+          ],
+          "PATCH": [
+            "hadoop-mapreduce-historyserver"
+          ],
+          "STANDARD": [
+            "hadoop-mapreduce-historyserver"
+          ]
+        },
+        "MAPREDUCE2_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "hadoop-mapreduce-INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        }
+      },
+      "OOZIE": {
+        "OOZIE_CLIENT": {
+          "STACK-SELECT-PACKAGE": "oozie-client",
+          "INSTALL": [
+            "oozie-client"
+          ],
+          "PATCH": [
+            "oozie-client"
+          ],
+          "STANDARD": [
+            "oozie-client"
+          ]
+        },
+        "OOZIE_SERVER": {
+          "STACK-SELECT-PACKAGE": "oozie-server",
+          "INSTALL": [
+            "oozie-client",
+            "oozie-server"
+          ],
+          "PATCH": [
+            "oozie-server"
+          ],
+          "STANDARD": [
+            "oozie-client",
+            "oozie-server"
+          ]
+        }
+      },
+      "PIG": {
+        "PIG": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        }
+      },
+      "R4ML": {
+        "R4ML": {
+          "STACK-SELECT-PACKAGE": "r4ml-client",
+          "INSTALL": [
+            "r4ml-client"
+          ],
+          "PATCH": [
+            "r4ml-client"
+          ],
+          "STANDARD": [
+            "r4ml-client"
+          ]
+        }
+      },
+      "RANGER": {
+        "RANGER_ADMIN": {
+          "STACK-SELECT-PACKAGE": "ranger-admin",
+          "INSTALL": [
+            "ranger-admin"
+          ],
+          "PATCH": [
+            "ranger-admin"
+          ],
+          "STANDARD": [
+            "ranger-admin"
+          ]
+        },
+        "RANGER_TAGSYNC": {
+          "STACK-SELECT-PACKAGE": "ranger-tagsync",
+          "INSTALL": [
+            "ranger-tagsync"
+          ],
+          "PATCH": [
+            "ranger-tagsync"
+          ],
+          "STANDARD": [
+            "ranger-tagsync"
+          ]
+        },
+        "RANGER_USERSYNC": {
+          "STACK-SELECT-PACKAGE": "ranger-usersync",
+          "INSTALL": [
+            "ranger-usersync"
+          ],
+          "PATCH": [
+            "ranger-usersync"
+          ],
+          "STANDARD": [
+            "ranger-usersync"
+          ]
+        }
+      },
+      "RANGER_KMS": {
+        "RANGER_KMS_SERVER": {
+          "STACK-SELECT-PACKAGE": "ranger-kms",
+          "INSTALL": [
+            "ranger-kms"
+          ],
+          "PATCH": [
+            "ranger-kms"
+          ],
+          "STANDARD": [
+            "ranger-kms"
+          ]
+        }
+      },
+      "SLIDER": {
+        "SLIDER": {
+          "STACK-SELECT-PACKAGE": "slider-client",
+          "INSTALL": [
+            "slider-client"
+          ],
+          "PATCH": [
+            "slider-client"
+          ],
+          "STANDARD": [
+            "slider-client",
+            "hadoop-client"
+          ]
+        }
+      },
+      "SPARK": {
+        "LIVY_SERVER": {
+          "STACK-SELECT-PACKAGE": "livy-server",
+          "INSTALL": [
+            "livy-server"
+          ],
+          "PATCH": [
+            "livy-server"
+          ],
+          "STANDARD": [
+            "livy-server"
+          ]
+        },
+        "SPARK_CLIENT": {
+          "STACK-SELECT-PACKAGE": "spark-client",
+          "INSTALL": [
+            "spark-client"
+          ],
+          "PATCH": [
+            "spark-client"
+          ],
+          "STANDARD": [
+            "spark-client"
+          ]
+        },
+        "SPARK_JOBHISTORYSERVER": {
+          "STACK-SELECT-PACKAGE": "spark-historyserver",
+          "INSTALL": [
+            "spark-historyserver"
+          ],
+          "PATCH": [
+            "spark-historyserver"
+          ],
+          "STANDARD": [
+            "spark-historyserver"
+          ]
+        },
+        "SPARK_THRIFTSERVER": {
+          "STACK-SELECT-PACKAGE": "spark-thriftserver",
+          "INSTALL": [
+            "spark-thriftserver"
+          ],
+          "PATCH": [
+            "spark-thriftserver"
+          ],
+          "STANDARD": [
+            "spark-thriftserver"
+          ]
+        }
+      },
+      "SPARK2": {
+        "LIVY2_SERVER": {
+          "STACK-SELECT-PACKAGE": "livy2-server",
+          "INSTALL": [
+            "livy2-server"
+          ],
+          "PATCH": [
+            "livy2-server"
+          ],
+          "STANDARD": [
+            "livy2-server"
+          ]
+        },
+        "SPARK2_CLIENT": {
+          "STACK-SELECT-PACKAGE": "spark2-client",
+          "INSTALL": [
+            "spark2-client"
+          ],
+          "PATCH": [
+            "spark2-client"
+          ],
+          "STANDARD": [
+            "spark2-client"
+          ]
+        },
+        "SPARK2_JOBHISTORYSERVER": {
+          "STACK-SELECT-PACKAGE": "spark2-historyserver",
+          "INSTALL": [
+            "spark2-historyserver"
+          ],
+          "PATCH": [
+            "spark2-historyserver"
+          ],
+          "STANDARD": [
+            "spark2-historyserver"
+          ]
+        },
+        "SPARK2_THRIFTSERVER": {
+          "STACK-SELECT-PACKAGE": "spark2-thriftserver",
+          "INSTALL": [
+            "spark2-thriftserver"
+          ],
+          "PATCH": [
+            "spark2-thriftserver"
+          ],
+          "STANDARD": [
+            "spark2-thriftserver"
+          ]
+        }
+      },
+      "SQOOP": {
+        "SQOOP": {
+          "STACK-SELECT-PACKAGE": "sqoop-client",
+          "INSTALL": [
+            "sqoop-client"
+          ],
+          "PATCH": [
+            "sqoop-client"
+          ],
+          "STANDARD": [
+            "sqoop-client"
+          ]
+        }
+      },
+      "STORM": {
+        "NIMBUS": {
+          "STACK-SELECT-PACKAGE": "storm-nimbus",
+          "INSTALL": [
+            "storm-client",
+            "storm-nimbus"
+          ],
+          "PATCH": [
+            "storm-client",
+            "storm-nimbus"
+          ],
+          "STANDARD": [
+            "storm-client",
+            "storm-nimbus"
+          ]
+        },
+        "SUPERVISOR": {
+          "STACK-SELECT-PACKAGE": "storm-supervisor",
+          "INSTALL": [
+            "storm-supervisor"
+          ],
+          "PATCH": [
+            "storm-supervisor"
+          ],
+          "STANDARD": [
+            "storm-client",
+            "storm-supervisor"
+          ]
+        },
+        "DRPC_SERVER": {
+          "STACK-SELECT-PACKAGE": "storm-client",
+          "INSTALL": [
+            "storm-client"
+          ],
+          "PATCH": [
+            "storm-client"
+          ],
+          "STANDARD": [
+            "storm-client"
+          ]
+        },
+        "STORM_UI_SERVER": {
+          "STACK-SELECT-PACKAGE": "storm-client",
+          "INSTALL": [
+            "storm-client"
+          ],
+          "PATCH": [
+            "storm-client"
+          ],
+          "STANDARD": [
+            "storm-client"
+          ]
+        }
+      },
+      "SYSTEMML": {
+        "SYSTEMML": {
+          "STACK-SELECT-PACKAGE": "systemml-client",
+          "INSTALL": [
+            "systemml-client"
+          ],
+          "PATCH": [
+            "systemml-client"
+          ],
+          "STANDARD": [
+            "systemml-client"
+          ]
+        }
+      },
+      "TEZ": {
+        "TEZ_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        }
+      },
+      "TITAN": {
+        "TITAN_CLIENT": {
+          "STACK-SELECT-PACKAGE": "titan-client",
+          "INSTALL": [
+            "titan-client"
+          ],
+          "PATCH": [
+            "titan-client"
+          ],
+          "STANDARD": [
+            "titan-client"
+          ]
+        },
+        "TITAN_SERVER": {
+          "STACK-SELECT-PACKAGE": "titan-server",
+          "INSTALL": [
+            "titan-server"
+          ],
+          "PATCH": [
+            "titan-server"
+          ],
+          "STANDARD": [
+            "titan-server"
+          ]
+        }
+      },
+      "YARN": {
+        "APP_TIMELINE_SERVER": {
+          "STACK-SELECT-PACKAGE": "hadoop-yarn-timelineserver",
+          "INSTALL": [
+            "hadoop-yarn-timelineserver"
+          ],
+          "PATCH": [
+            "hadoop-yarn-timelineserver"
+          ],
+          "STANDARD": [
+            "hadoop-yarn-timelineserver"
+          ]
+        },
+        "NODEMANAGER": {
+          "STACK-SELECT-PACKAGE": "hadoop-yarn-nodemanager",
+          "INSTALL": [
+            "hadoop-yarn-nodemanager"
+          ],
+          "PATCH": [
+            "hadoop-yarn-nodemanager"
+          ],
+          "STANDARD": [
+            "hadoop-yarn-nodemanager"
+          ]
+        },
+        "RESOURCEMANAGER": {
+          "STACK-SELECT-PACKAGE": "hadoop-yarn-resourcemanager",
+          "INSTALL": [
+            "hadoop-yarn-resourcemanager"
+          ],
+          "PATCH": [
+            "hadoop-yarn-resourcemanager"
+          ],
+          "STANDARD": [
+            "hadoop-yarn-resourcemanager"
+          ]
+        },
+        "YARN_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        }
+      },
+      "ZEPPELIN": {
+        "ZEPPELIN_MASTER": {
+          "STACK-SELECT-PACKAGE": "zeppelin-server",
+          "INSTALL": [
+            "zeppelin-server"
+          ],
+          "PATCH": [
+            "zeppelin-server"
+          ],
+          "STANDARD": [
+            "zeppelin-server"
+          ]
+        }
+      },
+      "ZOOKEEPER": {
+        "ZOOKEEPER_CLIENT": {
+          "STACK-SELECT-PACKAGE": "zookeeper-client",
+          "INSTALL": [
+            "zookeeper-client"
+          ],
+          "PATCH": [
+            "zookeeper-client"
+          ],
+          "STANDARD": [
+            "zookeeper-client"
+          ]
+        },
+        "ZOOKEEPER_SERVER": {
+          "STACK-SELECT-PACKAGE": "zookeeper-server",
+          "INSTALL": [
+            "zookeeper-server"
+          ],
+          "PATCH": [
+            "zookeeper-server"
+          ],
+          "STANDARD": [
+            "zookeeper-server"
+          ]
+        }
+      }
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml 
b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
index ca3be1d..c2e2971 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
@@ -250,6 +250,21 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
+  <!-- Define stack_select_packages property in the base stack. DO NOT 
override this property for each stack version -->
+  <property>
+    <name>stack_select_packages</name>
+    <value/>
+    <description>Associations between component and stack-select 
tools.</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_select_packages.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
   <property>
     <name>stack_root</name>
     <value>{"HDP":"/usr/hdp"}</value>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
index 8a583b3..8bae9e6 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
@@ -28,7 +28,7 @@ class AfterInstallHook(Hook):
     import params
 
     env.set_params(params)
-    setup_stack_symlinks()
+    setup_stack_symlinks(self.stroutfile)
     setup_config()
 
     link_configs(self.stroutfile)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
index 36a202f..0ffd5a5 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -33,7 +33,7 @@ from resource_management.libraries.resources.xml_config 
import XmlConfig
 from resource_management.libraries.script import Script
 
 
-def setup_stack_symlinks():
+def setup_stack_symlinks(struct_out_file):
   """
   Invokes <stack-selector-tool> set all against a calculated fully-qualified, 
"normalized" version based on a
   stack version, such as "2.3". This should always be called after a component 
has been
@@ -42,18 +42,30 @@ def setup_stack_symlinks():
   :return:
   """
   import params
-  if params.stack_version_formatted != "" and 
compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    # try using the exact version first, falling back in just the stack if 
it's not defined
-    # which would only be during an intial cluster installation
-    version = params.current_version if params.current_version is not None 
else params.stack_version_unformatted
-
-    if not params.upgrade_suspended:
-      if params.host_sys_prepped:
-        Logger.warning("Skipping running stack-selector-tool for stack {0} as 
its a sys_prepped host. This may cause symlink pointers not to be created for 
HDP componets installed later on top of an already sys_prepped 
host.".format(version))
-        return
-      # On parallel command execution this should be executed by a single 
process at a time.
-      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = 
params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-        stack_select.select_all(version)
+  if params.upgrade_suspended:
+    Logger.warning("Skipping running stack-selector-tool because there is a 
suspended upgrade")
+    return
+
+  if params.host_sys_prepped:
+    Logger.warning("Skipping running stack-selector-tool becase this is a 
sys_prepped host. This may cause symlink pointers not to be created for HDP 
componets installed later on top of an already sys_prepped host.")
+    return
+
+  # get the packages which the stack-select tool should be used on
+  stack_select_packages = 
stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
+  if stack_select_packages is None:
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("There is no advertised version for this component stored in 
{0}".format(struct_out_file))
+    return
+
+  # On parallel command execution this should be executed by a single process 
at a time.
+  with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = 
params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+    for package in stack_select_packages:
+      stack_select.select(package, json_version)
+
 
 def setup_config():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_select_packages.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_select_packages.json
 
b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_select_packages.json
new file mode 100644
index 0000000..0ad2626
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_select_packages.json
@@ -0,0 +1,848 @@
+{
+  "HDP": {
+    "stack-select": {
+      "ACCUMULO": {
+        "ACCUMULO_CLIENT": {
+          "STACK-SELECT-PACKAGE": "accumulo-client",
+          "INSTALL": [
+            "accumulo-client"
+          ],
+          "PATCH": [
+            "accumulo-client"
+          ],
+          "STANDARD": [
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_GC": {
+          "STACK-SELECT-PACKAGE": "accumulo-gc",
+          "INSTALL": [
+            "accumulo-gc"
+          ],
+          "PATCH": [
+            "accumulo-gc"
+          ],
+          "STANDARD": [
+            "accumulo-gc",
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_MASTER": {
+          "STACK-SELECT-PACKAGE": "accumulo-master",
+          "INSTALL": [
+            "accumulo-master"
+          ],
+          "PATCH": [
+            "accumulo-master"
+          ],
+          "STANDARD": [
+            "accumulo-master",
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_MONITOR": {
+          "STACK-SELECT-PACKAGE": "accumulo-monitor",
+          "INSTALL": [
+            "accumulo-monitor"
+          ],
+          "PATCH": [
+            "accumulo-monitor"
+          ],
+          "STANDARD": [
+            "accumulo-monitor",
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_TRACER": {
+          "STACK-SELECT-PACKAGE": "accumulo-tracer",
+          "INSTALL": [
+            "accumulo-tracer"
+          ],
+          "PATCH": [
+            "accumulo-tracer"
+          ],
+          "STANDARD": [
+            "accumulo-tracer",
+            "accumulo-client"
+          ]
+        },
+        "ACCUMULO_TSERVER": {
+          "STACK-SELECT-PACKAGE": "accumulo-tablet",
+          "INSTALL": [
+            "accumulo-tablet"
+          ],
+          "PATCH": [
+            "accumulo-tablet"
+          ],
+          "STANDARD": [
+            "accumulo-tablet",
+            "accumulo-client"
+          ]
+        }
+      },
+      "ATLAS": {
+        "ATLAS_CLIENT": {
+          "STACK-SELECT-PACKAGE": "atlas-client",
+          "INSTALL": [
+            "atlas-client"
+          ],
+          "PATCH": [
+            "atlas-client"
+          ],
+          "STANDARD": [
+            "atlas-client"
+          ]
+        },
+        "ATLAS_SERVER": {
+          "STACK-SELECT-PACKAGE": "atlas-server",
+          "INSTALL": [
+            "atlas-server"
+          ],
+          "PATCH": [
+            "atlas-server"
+          ],
+          "STANDARD": [
+            "atlas-server"
+          ]
+        }
+      },
+      "DRUID": {
+        "DRUID_COORDINATOR": {
+          "STACK-SELECT-PACKAGE": "druid-coordinator",
+          "INSTALL": [
+            "druid-coordinator"
+          ],
+          "PATCH": [
+            "druid-coordinator"
+          ],
+          "STANDARD": [
+            "druid-coordinator"
+          ]
+        },
+        "DRUID_OVERLORD": {
+          "STACK-SELECT-PACKAGE": "druid-overlord",
+          "INSTALL": [
+            "druid-overlord"
+          ],
+          "PATCH": [
+            "druid-overlord"
+          ],
+          "STANDARD": [
+            "druid-overlord"
+          ]
+        },
+        "DRUID_HISTORICAL": {
+          "STACK-SELECT-PACKAGE": "druid-historical",
+          "INSTALL": [
+            "druid-historical"
+          ],
+          "PATCH": [
+            "druid-historical"
+          ],
+          "STANDARD": [
+            "druid-historical"
+          ]
+        },
+        "DRUID_BROKER": {
+          "STACK-SELECT-PACKAGE": "druid-broker",
+          "INSTALL": [
+            "druid-broker"
+          ],
+          "PATCH": [
+            "druid-broker"
+          ],
+          "STANDARD": [
+            "druid-broker"
+          ]
+        },
+        "DRUID_MIDDLEMANAGER": {
+          "STACK-SELECT-PACKAGE": "druid-middlemanager",
+          "INSTALL": [
+            "druid-middlemanager"
+          ],
+          "PATCH": [
+            "druid-middlemanager"
+          ],
+          "STANDARD": [
+            "druid-middlemanager"
+          ]
+        },
+        "DRUID_ROUTER": {
+          "STACK-SELECT-PACKAGE": "druid-router",
+          "INSTALL": [
+            "druid-router"
+          ],
+          "PATCH": [
+            "druid-router"
+          ],
+          "STANDARD": [
+            "druid-router"
+          ]
+        },
+        "DRUID_SUPERSET": {
+          "STACK-SELECT-PACKAGE": "druid-superset",
+          "INSTALL": [
+            "druid-superset"
+          ],
+          "PATCH": [
+            "druid-superset"
+          ],
+          "STANDARD": [
+            "druid-superset"
+          ]
+        }
+      },
+      "FALCON": {
+        "FALCON_CLIENT": {
+          "STACK-SELECT-PACKAGE": "falcon-client",
+          "INSTALL": [
+            "falcon-client"
+          ],
+          "PATCH": [
+            "falcon-client"
+          ],
+          "STANDARD": [
+            "falcon-client"
+          ]
+        },
+        "FALCON_SERVER": {
+          "STACK-SELECT-PACKAGE": "falcon-server",
+          "INSTALL": [
+            "falcon-server"
+          ],
+          "PATCH": [
+            "falcon-server"
+          ],
+          "STANDARD": [
+            "falcon-server"
+          ]
+        }
+      },
+      "FLUME": {
+        "FLUME_HANDLER": {
+          "STACK-SELECT-PACKAGE": "flume-server",
+          "INSTALL": [
+            "flume-server"
+          ],
+          "PATCH": [
+            "flume-server"
+          ],
+          "STANDARD": [
+            "flume-server"
+          ]
+        }
+      },
+      "HBASE": {
+        "HBASE_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hbase-client",
+          "INSTALL": [
+            "hbase-client"
+          ],
+          "PATCH": [
+            "hbase-client"
+          ],
+          "STANDARD": [
+            "hbase-client",
+            "phoenix-client",
+            "hadoop-client"
+          ]
+        },
+        "HBASE_MASTER": {
+          "STACK-SELECT-PACKAGE": "hbase-master",
+          "INSTALL": [
+            "hbase-master"
+          ],
+          "PATCH": [
+            "hbase-master"
+          ],
+          "STANDARD": [
+            "hbase-master"
+          ]
+        },
+        "HBASE_REGIONSERVER": {
+          "STACK-SELECT-PACKAGE": "hbase-regionserver",
+          "INSTALL": [
+            "hbase-regionserver"
+          ],
+          "PATCH": [
+            "hbase-regionserver"
+          ],
+          "STANDARD": [
+            "hbase-regionserver"
+          ]
+        },
+        "PHOENIX_QUERY_SERVER": {
+          "STACK-SELECT-PACKAGE": "phoenix-server",
+          "INSTALL": [
+            "phoenix-server"
+          ],
+          "PATCH": [
+            "phoenix-server"
+          ],
+          "STANDARD": [
+            "phoenix-server"
+          ]
+        }
+      },
+      "HDFS": {
+        "DATANODE": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-datanode",
+          "INSTALL": [
+            "hadoop-hdfs-datanode"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-datanode"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-datanode"
+          ]
+        },
+        "HDFS_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        },
+        "NAMENODE": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-namenode",
+          "INSTALL": [
+            "hadoop-hdfs-namenode"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-namenode"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-namenode"
+          ]
+        },
+        "NFS_GATEWAY": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-nfs3",
+          "INSTALL": [
+            "hadoop-hdfs-nfs3"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-nfs3"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-nfs3"
+          ]
+        },
+        "JOURNALNODE": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-journalnode",
+          "INSTALL": [
+            "hadoop-hdfs-journalnode"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-journalnode"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-journalnode"
+          ]
+        },
+        "SECONDARY_NAMENODE": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-secondarynamenode",
+          "INSTALL": [
+            "hadoop-hdfs-secondarynamenode"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-secondarynamenode"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-secondarynamenode"
+          ]
+        },
+        "ZKFC": {
+          "STACK-SELECT-PACKAGE": "hadoop-hdfs-zkfc",
+          "INSTALL": [
+            "hadoop-hdfs-zkfc"
+          ],
+          "PATCH": [
+            "hadoop-hdfs-zkfc"
+          ],
+          "STANDARD": [
+            "hadoop-hdfs-zkfc"
+          ]
+        }
+      },
+      "HIVE": {
+        "HCAT": {
+          "STACK-SELECT-PACKAGE": "hive-webhcat",
+          "INSTALL": [
+            "hive-webhcat"
+          ],
+          "PATCH": [
+            "hive-webhcat"
+          ],
+          "STANDARD": [
+            "hive-webhcat"
+          ]
+        },
+        "HIVE_METASTORE": {
+          "STACK-SELECT-PACKAGE": "hive-metastore",
+          "INSTALL": [
+            "hive-metastore"
+          ],
+          "PATCH": [
+            "hive-metastore"
+          ],
+          "STANDARD": [
+            "hive-metastore"
+          ]
+        },
+        "HIVE_SERVER": {
+          "STACK-SELECT-PACKAGE": "hive-server2",
+          "INSTALL": [
+            "hive-server2"
+          ],
+          "PATCH": [
+            "hive-server2"
+          ],
+          "STANDARD": [
+            "hive-server2"
+          ]
+        },
+        "HIVE_SERVER_INTERACTIVE": {
+          "STACK-SELECT-PACKAGE": "hive-server2-hive2",
+          "INSTALL": [
+            "hive-server2-hive2"
+          ],
+          "PATCH": [
+            "hive-server2-hive2"
+          ],
+          "STANDARD": [
+            "hive-server2-hive2"
+          ]
+        },
+        "HIVE_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        },
+        "WEBHCAT_SERVER": {
+          "STACK-SELECT-PACKAGE": "hive-webhcat",
+          "INSTALL": [
+            "hive-webhcat"
+          ],
+          "PATCH": [
+            "hive-webhcat"
+          ],
+          "STANDARD": [
+            "hive-webhcat"
+          ]
+        }
+      },
+      "KAFKA": {
+        "KAFKA_BROKER": {
+          "STACK-SELECT-PACKAGE": "kafka-broker",
+          "INSTALL": [
+            "kafka-broker"
+          ],
+          "PATCH": [
+            "kafka-broker"
+          ],
+          "STANDARD": [
+            "kafka-broker"
+          ]
+        }
+      },
+      "KNOX": {
+        "KNOX_GATEWAY": {
+          "STACK-SELECT-PACKAGE": "knox-server",
+          "INSTALL": [
+            "knox-server"
+          ],
+          "PATCH": [
+            "knox-server"
+          ],
+          "STANDARD": [
+            "knox-server"
+          ]
+        }
+      },
+      "MAHOUT": {
+        "MAHOUT": {
+          "STACK-SELECT-PACKAGE": "mahout-client",
+          "INSTALL": [
+            "mahout-client"
+          ],
+          "PATCH": [
+            "mahout-client"
+          ],
+          "STANDARD": [
+            "mahout-client"
+          ]
+        }
+      },
+      "MAPREDUCE2": {
+        "HISTORYSERVER": {
+          "STACK-SELECT-PACKAGE": "hadoop-mapreduce-historyserver",
+          "INSTALL": [
+            "hadoop-mapreduce-historyserver"
+          ],
+          "PATCH": [
+            "hadoop-mapreduce-historyserver"
+          ],
+          "STANDARD": [
+            "hadoop-mapreduce-historyserver"
+          ]
+        },
+        "MAPREDUCE2_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "hadoop-mapreduce-INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        }
+      },
+      "OOZIE": {
+        "OOZIE_CLIENT": {
+          "STACK-SELECT-PACKAGE": "oozie-client",
+          "INSTALL": [
+            "oozie-client"
+          ],
+          "PATCH": [
+            "oozie-client"
+          ],
+          "STANDARD": [
+            "oozie-client"
+          ]
+        },
+        "OOZIE_SERVER": {
+          "STACK-SELECT-PACKAGE": "oozie-server",
+          "INSTALL": [
+            "oozie-client",
+            "oozie-server"
+          ],
+          "PATCH": [
+            "oozie-server"
+          ],
+          "STANDARD": [
+            "oozie-client",
+            "oozie-server"
+          ]
+        }
+      },
+      "PIG": {
+        "PIG": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        }
+      },
+      "RANGER": {
+        "RANGER_ADMIN": {
+          "STACK-SELECT-PACKAGE": "ranger-admin",
+          "INSTALL": [
+            "ranger-admin"
+          ],
+          "PATCH": [
+            "ranger-admin"
+          ],
+          "STANDARD": [
+            "ranger-admin"
+          ]
+        },
+        "RANGER_TAGSYNC": {
+          "STACK-SELECT-PACKAGE": "ranger-tagsync",
+          "INSTALL": [
+            "ranger-tagsync"
+          ],
+          "PATCH": [
+            "ranger-tagsync"
+          ],
+          "STANDARD": [
+            "ranger-tagsync"
+          ]
+        },
+        "RANGER_USERSYNC": {
+          "STACK-SELECT-PACKAGE": "ranger-usersync",
+          "INSTALL": [
+            "ranger-usersync"
+          ],
+          "PATCH": [
+            "ranger-usersync"
+          ],
+          "STANDARD": [
+            "ranger-usersync"
+          ]
+        }
+      },
+      "RANGER_KMS": {
+        "RANGER_KMS_SERVER": {
+          "STACK-SELECT-PACKAGE": "ranger-kms",
+          "INSTALL": [
+            "ranger-kms"
+          ],
+          "PATCH": [
+            "ranger-kms"
+          ],
+          "STANDARD": [
+            "ranger-kms"
+          ]
+        }
+      },
+      "SLIDER": {
+        "SLIDER": {
+          "STACK-SELECT-PACKAGE": "slider-client",
+          "INSTALL": [
+            "slider-client"
+          ],
+          "PATCH": [
+            "slider-client"
+          ],
+          "STANDARD": [
+            "slider-client",
+            "hadoop-client"
+          ]
+        }
+      },
+      "SPARK": {
+        "LIVY_SERVER": {
+          "STACK-SELECT-PACKAGE": "livy2-server",
+          "INSTALL": [
+            "livy2-server"
+          ],
+          "PATCH": [
+            "livy2-server"
+          ],
+          "STANDARD": [
+            "livy2-server"
+          ]
+        },
+        "SPARK_CLIENT": {
+          "STACK-SELECT-PACKAGE": "spark2-client",
+          "INSTALL": [
+            "spark2-client"
+          ],
+          "PATCH": [
+            "spark2-client"
+          ],
+          "STANDARD": [
+            "spark2-client"
+          ]
+        },
+        "SPARK_JOBHISTORYSERVER": {
+          "STACK-SELECT-PACKAGE": "spark2-historyserver",
+          "INSTALL": [
+            "spark2-historyserver"
+          ],
+          "PATCH": [
+            "spark2-historyserver"
+          ],
+          "STANDARD": [
+            "spark2-historyserver"
+          ]
+        },
+        "SPARK_THRIFTSERVER": {
+          "STACK-SELECT-PACKAGE": "spark2-thriftserver",
+          "INSTALL": [
+            "spark2-thriftserver"
+          ],
+          "PATCH": [
+            "spark2-thriftserver"
+          ],
+          "STANDARD": [
+            "spark2-thriftserver"
+          ]
+        }
+      },
+      "SQOOP": {
+        "SQOOP": {
+          "STACK-SELECT-PACKAGE": "sqoop-client",
+          "INSTALL": [
+            "sqoop-client"
+          ],
+          "PATCH": [
+            "sqoop-client"
+          ],
+          "STANDARD": [
+            "sqoop-client"
+          ]
+        }
+      },
+      "STORM": {
+        "NIMBUS": {
+          "STACK-SELECT-PACKAGE": "storm-nimbus",
+          "INSTALL": [
+            "storm-client",
+            "storm-nimbus"
+          ],
+          "PATCH": [
+            "storm-client",
+            "storm-nimbus"
+          ],
+          "STANDARD": [
+            "storm-client",
+            "storm-nimbus"
+          ]
+        },
+        "SUPERVISOR": {
+          "STACK-SELECT-PACKAGE": "storm-supervisor",
+          "INSTALL": [
+            "storm-supervisor"
+          ],
+          "PATCH": [
+            "storm-supervisor"
+          ],
+          "STANDARD": [
+            "storm-client",
+            "storm-supervisor"
+          ]
+        },
+        "DRPC_SERVER": {
+          "STACK-SELECT-PACKAGE": "storm-client",
+          "INSTALL": [
+            "storm-client"
+          ],
+          "PATCH": [
+            "storm-client"
+          ],
+          "STANDARD": [
+            "storm-client"
+          ]
+        },
+        "STORM_UI_SERVER": {
+          "STACK-SELECT-PACKAGE": "storm-client",
+          "INSTALL": [
+            "storm-client"
+          ],
+          "PATCH": [
+            "storm-client"
+          ],
+          "STANDARD": [
+            "storm-client"
+          ]
+        }
+      },
+      "TEZ": {
+        "TEZ_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        }
+      },
+      "YARN": {
+        "APP_TIMELINE_SERVER": {
+          "STACK-SELECT-PACKAGE": "hadoop-yarn-timelineserver",
+          "INSTALL": [
+            "hadoop-yarn-timelineserver"
+          ],
+          "PATCH": [
+            "hadoop-yarn-timelineserver"
+          ],
+          "STANDARD": [
+            "hadoop-yarn-timelineserver"
+          ]
+        },
+        "NODEMANAGER": {
+          "STACK-SELECT-PACKAGE": "hadoop-yarn-nodemanager",
+          "INSTALL": [
+            "hadoop-yarn-nodemanager"
+          ],
+          "PATCH": [
+            "hadoop-yarn-nodemanager"
+          ],
+          "STANDARD": [
+            "hadoop-yarn-nodemanager"
+          ]
+        },
+        "RESOURCEMANAGER": {
+          "STACK-SELECT-PACKAGE": "hadoop-yarn-resourcemanager",
+          "INSTALL": [
+            "hadoop-yarn-resourcemanager"
+          ],
+          "PATCH": [
+            "hadoop-yarn-resourcemanager"
+          ],
+          "STANDARD": [
+            "hadoop-yarn-resourcemanager"
+          ]
+        },
+        "YARN_CLIENT": {
+          "STACK-SELECT-PACKAGE": "hadoop-client",
+          "INSTALL": [
+            "hadoop-client"
+          ],
+          "PATCH": [
+            "INVALID"
+          ],
+          "STANDARD": [
+            "hadoop-client"
+          ]
+        }
+      },
+      "ZEPPELIN": {
+        "ZEPPELIN_MASTER": {
+          "STACK-SELECT-PACKAGE": "zeppelin-server",
+          "INSTALL": [
+            "zeppelin-server"
+          ],
+          "PATCH": [
+            "zeppelin-server"
+          ],
+          "STANDARD": [
+            "zeppelin-server"
+          ]
+        }
+      },
+      "ZOOKEEPER": {
+        "ZOOKEEPER_CLIENT": {
+          "STACK-SELECT-PACKAGE": "zookeeper-client",
+          "INSTALL": [
+            "zookeeper-client"
+          ],
+          "PATCH": [
+            "zookeeper-client"
+          ],
+          "STANDARD": [
+            "zookeeper-client"
+          ]
+        },
+        "ZOOKEEPER_SERVER": {
+          "STACK-SELECT-PACKAGE": "zookeeper-server",
+          "INSTALL": [
+            "zookeeper-server"
+          ],
+          "PATCH": [
+            "zookeeper-server"
+          ],
+          "STANDARD": [
+            "zookeeper-server"
+          ]
+        }
+      }
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py 
b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
index b867561..2c92e4c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
@@ -27,7 +27,9 @@ import os
 class TestFlumeHandler(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "FLUME/1.4.0.2.0/package"
   STACK_VERSION = "2.0.6"
-  
+
+  CONFIG_OVERRIDES = {"serviceName":"FLUME", "role":"FLUME_HANDLER"}
+
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + 
"/scripts/flume_handler.py",
                        classname = "FlumeHandler",
@@ -560,6 +562,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "pre_upgrade_restart",
                        config_file="flume_22.json",
+                       config_overrides = self.CONFIG_OVERRIDES,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py 
b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
index cffec06..7c5c7f5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -29,6 +29,8 @@ class TestHBaseClient(RMFTestCase):
   STACK_VERSION = "2.0.6"
   TMP_PATH = '/hadoop'
 
+  CONFIG_OVERRIDES = {"serviceName":"HBASE", "role":"HBASE_CLIENT"}
+
   def test_configure_secured(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + 
"/scripts/hbase_client.py",
                    classname = "HbaseClient",
@@ -239,6 +241,7 @@ class TestHBaseClient(RMFTestCase):
                        classname = "HbaseClient",
                        command = "restart",
                        config_dict = json_content,
+                       config_overrides = self.CONFIG_OVERRIDES,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, ''), (0, None, 
''), (0, None, '')],
@@ -258,7 +261,7 @@ class TestHBaseClient(RMFTestCase):
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertEquals(
       ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', 
'--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', 
'0'),
-       mocks_dict['checked_call'].call_args_list[4][0][0])
+       mocks_dict['checked_call'].call_args_list[5][0][0])
     self.assertEquals(
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', 
'--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', 
'0'),
        mocks_dict['call'].call_args_list[1][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py 
b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 05fa204..42289e1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -29,6 +29,8 @@ class TestHBaseMaster(RMFTestCase):
   TMP_PATH = "/hadoop"
   DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', 
'/mr-history/done', '/app-logs', '/tmp']
 
+  CONFIG_OVERRIDES = {"serviceName":"HBASE", "role":"HBASE_MASTER"}
+
   def test_install_hbase_master_default_no_phx(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + 
"/scripts/hbase_master.py",
                        classname = "HbaseMaster",
@@ -747,6 +749,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
+                       config_overrides = self.CONFIG_OVERRIDES,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        mocks_dict = mocks_dict)
@@ -770,6 +773,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
+                       config_overrides = self.CONFIG_OVERRIDES,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, ''), (0, None, 
''), (0, None, '')],

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py 
b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 93f5d19..6a2d8fb 100644
--- 
a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ 
b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -30,6 +30,8 @@ class TestHbaseRegionServer(RMFTestCase):
   STACK_VERSION = "2.0.6"
   TMP_PATH = '/hadoop'
 
+  CONFIG_OVERRIDES = {"serviceName":"HBASE", "role":"HBASE_REGIONSERVER"}
+
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + 
"/scripts/hbase_regionserver.py",
                    classname = "HbaseRegionServer",
@@ -540,6 +542,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        classname = "HbaseRegionServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
+                       config_overrides = self.CONFIG_OVERRIDES,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
@@ -580,6 +583,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        classname = "HbaseRegionServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
+                       config_overrides = self.CONFIG_OVERRIDES,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None), (0, None), (0, 
None)],

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py 
b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
index 1b324d4..973e274 100644
--- 
a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
+++ 
b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
@@ -32,6 +32,8 @@ class TestPhoenixQueryServer(RMFTestCase):
   STACK_VERSION = "2.3"
   TMP_PATH = "/hadoop"
 
+  CONFIG_OVERRIDES = {"serviceName":"HBASE", "role":"PHOENIX_QUERY_SERVER"}
+
   def test_configure_default(self):
     self.executeScript(
       self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
@@ -432,6 +434,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       classname = "PhoenixQueryServer",
       command = "pre_upgrade_restart",
       config_dict = json_content,
+      config_overrides = self.CONFIG_OVERRIDES,
       call_mocks = [(0, "/etc/hbase/2.3.0.0-1234/0", ''), (0, None, None), (0, 
None, None)],
       stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2bab2159/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py 
b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index d2968f8..0f31ad2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -32,6 +32,8 @@ class TestDatanode(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
 
+  CONFIG_OVERRIDES = {"serviceName":"HDFS", "role":"DATANODE"}
+
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + 
"/scripts/datanode.py",
                        classname = "DataNode",
@@ -484,6 +486,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
+                       config_overrides = self.CONFIG_OVERRIDES,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
@@ -504,6 +507,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
+                       config_overrides = self.CONFIG_OVERRIDES,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],

Reply via email to