Repository: ambari
Updated Branches:
  refs/heads/trunk 62e2183db -> d2cad200f


AMBARI-15557 YARN service check fails if there is no queue named "default" 
(dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d2cad200
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d2cad200
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d2cad200

Branch: refs/heads/trunk
Commit: d2cad200f107947cfc77e550e8acd81c3418f438
Parents: 62e2183
Author: Dmytro Sen <d...@apache.org>
Authored: Tue Jun 7 14:17:48 2016 +0300
Committer: Dmytro Sen <d...@apache.org>
Committed: Tue Jun 7 14:17:48 2016 +0300

----------------------------------------------------------------------
 .../YARN/2.1.0.2.0/configuration/yarn-env.xml   |  9 +++
 .../2.1.0.2.0/package/scripts/params_linux.py   |  1 +
 .../2.1.0.2.0/package/scripts/service_check.py  |  3 +-
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 82 +++++++++++++++++++-
 .../stacks/HDP/2.2/services/stack_advisor.py    |  5 +-
 .../stacks/HDP/2.5/services/stack_advisor.py    | 63 ---------------
 .../stacks/2.0.6/common/test_stack_advisor.py   | 62 +++++++++++++++
 .../stacks/2.5/common/test_stack_advisor.py     | 75 ++++++++++++++++++
 8 files changed, 234 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d2cad200/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml
index 648c32e..9aa852b 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml
@@ -252,4 +252,13 @@ YARN_OPTS="$YARN_OPTS 
-Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
+  <property>
+    <name>service_check.queue.name</name>
+    <value>default</value>
+    <description>
+      The queue that used by service check.
+    </description>
+    <on-ambari-upgrade add="true" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2cad200/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index a63abd5..4d281a8 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -173,6 +173,7 @@ mapred_log_dir_prefix = 
config['configurations']['mapred-env']['mapred_log_dir_p
 mapred_env_sh_template = config['configurations']['mapred-env']['content']
 yarn_env_sh_template = config['configurations']['yarn-env']['content']
 yarn_nodemanager_recovery_dir = 
default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
+service_check_queue_name = 
config['configurations']['yarn-env']['service_check.queue.name']
 
 if len(rm_hosts) > 1:
   additional_rm_host = rm_hosts[1]

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2cad200/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
index 6b7e0c3..131cd42 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
@@ -104,7 +104,8 @@ class ServiceCheckDefault(ServiceCheck):
 
     yarn_distrubuted_shell_check_params = ["yarn 
org.apache.hadoop.yarn.applications.distributedshell.Client",
                                            "-shell_command", "ls", 
"-num_containers", "{number_of_nm}",
-                                           "-jar", 
"{path_to_distributed_shell_jar}", "-timeout", "300000"]
+                                           "-jar", 
"{path_to_distributed_shell_jar}", "-timeout", "300000",
+                                           "--queue", 
"{service_check_queue_name}"]
     yarn_distrubuted_shell_check_cmd = format(" 
".join(yarn_distrubuted_shell_check_params))
 
     if params.security_enabled:

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2cad200/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index aa62d1e..b7c19ec 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -22,11 +22,17 @@ import os
 import sys
 from math import ceil, floor
 
+from resource_management.core.logger import Logger
+
 from stack_advisor import DefaultStackAdvisor
 
 
 class HDP206StackAdvisor(DefaultStackAdvisor):
 
+  def __init__(self):
+    super(HDP206StackAdvisor, self).__init__()
+    Logger.initialize_logger()
+
   def getComponentLayoutValidations(self, services, hosts):
     """Returns array of Validation objects about issues with hostnames 
components assigned to"""
     items = super(HDP206StackAdvisor, 
self).getComponentLayoutValidations(services, hosts)
@@ -899,7 +905,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "HDFS": { "hdfs-site": self.validateHDFSConfigurations,
                 "hadoop-env": self.validateHDFSConfigurationsEnv},
       "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
-      "YARN": {"yarn-site": self.validateYARNConfigurations},
+      "YARN": {"yarn-site": self.validateYARNConfigurations,
+               "yarn-env": self.validateYARNEnvConfigurations},
       "HBASE": {"hbase-env": self.validateHbaseEnvConfigurations},
       "STORM": {"storm-site": self.validateStormConfigurations},
       "AMBARI_METRICS": {"ams-hbase-site": 
self.validateAmsHbaseSiteConfigurations,
@@ -1383,6 +1390,16 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
                         {"config-name": 
'yarn.scheduler.maximum-allocation-mb', "item": 
self.validatorLessThenDefaultValue(properties, recommendedDefaults, 
'yarn.scheduler.maximum-allocation-mb')} ]
     return self.toConfigurationValidationProblems(validationItems, "yarn-site")
 
+  def validateYARNEnvConfigurations(self, properties, recommendedDefaults, 
configurations, services, hosts):
+    validationItems = [ ]
+    yarnEnvProperties = getSiteProperties(configurations, "yarn-env")
+    capacity_scheduler_properties, received_as_key_value_pair = 
self.getCapacitySchedulerProperties(services)
+    leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
+    service_checkQueueName=yarnEnvProperties.get("service_check.queue.name")
+    if service_checkQueueName not in leafQueueNames:
+      validationItems.append({"config-name": 'service_check.queue.name', 
"item": self.getErrorItem("service_check.queue.name is not exist, or not 
corresponds to existing leaf queue")})
+    return self.toConfigurationValidationProblems(validationItems, "yarn-env")
+
   def validateHbaseEnvConfigurations(self, properties, recommendedDefaults, 
configurations, services, hosts):
     hbase_site = getSiteProperties(configurations, "hbase-site")
     validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": 
self.validatorLessThenDefaultValue(properties, recommendedDefaults, 
'hbase_regionserver_heapsize')},
@@ -1486,6 +1503,69 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
         return False
     return True
 
+  """
+  Returns the dictionary of configs for 'capacity-scheduler'.
+  """
+  def getCapacitySchedulerProperties(self, services):
+    capacity_scheduler_properties = dict()
+    received_as_key_value_pair = True
+    if "capacity-scheduler" in services['configurations']:
+      if "capacity-scheduler" in 
services['configurations']["capacity-scheduler"]["properties"]:
+        cap_sched_props_as_str = 
services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]
+        if cap_sched_props_as_str:
+          cap_sched_props_as_str = str(cap_sched_props_as_str).split('\n')
+          if len(cap_sched_props_as_str) > 0 and cap_sched_props_as_str[0] != 
'null':
+            # Received confgs as one "\n" separated string
+            for property in cap_sched_props_as_str:
+              key, sep, value = property.partition("=")
+              capacity_scheduler_properties[key] = value
+            Logger.info("'capacity-scheduler' configs is passed-in as a single 
'\\n' separated string. "
+                        
"count(services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'])
 = "
+                        "{0}".format(len(capacity_scheduler_properties)))
+            received_as_key_value_pair = False
+          else:
+            Logger.info("Passed-in 
services['configurations']['capacity-scheduler']['properties']['capacity-scheduler']
 is 'null'.")
+        else:
+          Logger.info("'capacity-schdeuler' configs not passed-in as single 
'\\n' string in "
+                      
"services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'].")
+      if not capacity_scheduler_properties:
+        # Received configs as a dictionary (Generally on 1st invocation).
+        capacity_scheduler_properties = 
services['configurations']["capacity-scheduler"]["properties"]
+        Logger.info("'capacity-scheduler' configs is passed-in as a 
dictionary. "
+                    
"count(services['configurations']['capacity-scheduler']['properties']) = 
{0}".format(len(capacity_scheduler_properties)))
+    else:
+      Logger.error("Couldn't retrieve 'capacity-scheduler' from services.")
+
+    Logger.info("Retrieved 'capacity-scheduler' received as dictionary : 
'{0}'. configs : {1}" \
+                .format(received_as_key_value_pair, 
capacity_scheduler_properties.items()))
+    return capacity_scheduler_properties, received_as_key_value_pair
+
+  """
+  Gets all YARN leaf queues.
+  """
+  def getAllYarnLeafQueues(self, capacitySchedulerProperties):
+    config_list = capacitySchedulerProperties.keys()
+    yarn_queues = []
+    leafQueueNames = set()
+    if 'yarn.scheduler.capacity.root.queues' in config_list:
+      yarn_queues = 
capacitySchedulerProperties.get('yarn.scheduler.capacity.root.queues')
+
+    if yarn_queues:
+      toProcessQueues = yarn_queues.split(",")
+      while len(toProcessQueues) > 0:
+        queue = toProcessQueues.pop()
+        queueKey = "yarn.scheduler.capacity.root." + queue + ".queues"
+        if queueKey in capacitySchedulerProperties:
+          # If parent queue, add children
+          subQueues = capacitySchedulerProperties[queueKey].split(",")
+          for subQueue in subQueues:
+            toProcessQueues.append(queue + "." + subQueue)
+        else:
+          # Leaf queue
+          queueName = queue.split(".")[-1]
+          leafQueueNames.add(queueName)
+    return leafQueueNames
+
 def getOldValue(self, services, configType, propertyName):
   if services:
     if 'changed-configurations' in services.keys():

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2cad200/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 03e8150..22d29e5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -1501,6 +1501,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     return self.toConfigurationValidationProblems(validationItems, 
"ranger-storm-plugin-properties")
 
   def validateYARNEnvConfigurations(self, properties, recommendedDefaults, 
configurations, services, hosts):
+    parentValidationProblems = super(HDP22StackAdvisor, 
self).validateYARNEnvConfigurations(properties, recommendedDefaults, 
configurations, services, hosts)
     validationItems = []
     if "yarn_cgroups_enabled" in properties:
       yarn_cgroups_enabled = properties["yarn_cgroups_enabled"].lower() == 
"true"
@@ -1511,7 +1512,9 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       if not security_enabled and yarn_cgroups_enabled:
         validationItems.append({"config-name": "yarn_cgroups_enabled",
                               "item": self.getWarnItem("CPU Isolation should 
only be enabled if security is enabled")})
-    return self.toConfigurationValidationProblems(validationItems, "yarn-env")
+    validationProblems = 
self.toConfigurationValidationProblems(validationItems, "yarn-env")
+    validationProblems.extend(parentValidationProblems)
+    return validationProblems
 
   def validateYARNRangerPluginConfigurations(self, properties, 
recommendedDefaults, configurations, services, hosts):
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2cad200/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 6ecebc6..4b1926a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1095,69 +1095,6 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       Logger.error("Problem retrieving YARN queues. Skipping updating HIVE 
Server Interactve "
                    "'hive.server2.tez.default.queues' property attributes.")
 
-  """
-  Gets all YARN leaf queues.
-  """
-  def getAllYarnLeafQueues(self, capacitySchedulerProperties):
-    config_list = capacitySchedulerProperties.keys()
-    yarn_queues = []
-    leafQueueNames = set()
-    if 'yarn.scheduler.capacity.root.queues' in config_list:
-      yarn_queues = 
capacitySchedulerProperties.get('yarn.scheduler.capacity.root.queues')
-
-    if yarn_queues:
-      toProcessQueues = yarn_queues.split(",")
-      while len(toProcessQueues) > 0:
-        queue = toProcessQueues.pop()
-        queueKey = "yarn.scheduler.capacity.root." + queue + ".queues"
-        if queueKey in capacitySchedulerProperties:
-          # If parent queue, add children
-          subQueues = capacitySchedulerProperties[queueKey].split(",")
-          for subQueue in subQueues:
-            toProcessQueues.append(queue + "." + subQueue)
-        else:
-          # Leaf queue
-          queueName = queue.split(".")[-1]
-          leafQueueNames.add(queueName)
-    return leafQueueNames
-
-  """
-  Returns the dictionary of configs for 'capacity-scheduler'.
-  """
-  def getCapacitySchedulerProperties(self, services):
-    capacity_scheduler_properties = dict()
-    received_as_key_value_pair = True
-    if "capacity-scheduler" in services['configurations']:
-      if "capacity-scheduler" in 
services['configurations']["capacity-scheduler"]["properties"]:
-        cap_sched_props_as_str = 
services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]
-        if cap_sched_props_as_str:
-          cap_sched_props_as_str = str(cap_sched_props_as_str).split('\n')
-          if len(cap_sched_props_as_str) > 0 and cap_sched_props_as_str[0] != 
'null':
-            # Received confgs as one "\n" separated string
-            for property in cap_sched_props_as_str:
-              key, sep, value = property.partition("=")
-              capacity_scheduler_properties[key] = value
-            Logger.info("'capacity-scheduler' configs is passed-in as a single 
'\\n' separated string. "
-                        
"count(services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'])
 = "
-                        "{0}".format(len(capacity_scheduler_properties)))
-            received_as_key_value_pair = False
-          else:
-            Logger.info("Passed-in 
services['configurations']['capacity-scheduler']['properties']['capacity-scheduler']
 is 'null'.")
-        else:
-          Logger.info("'capacity-schdeuler' configs not passed-in as single 
'\\n' string in "
-                      
"services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'].")
-      if not capacity_scheduler_properties:
-        # Received configs as a dictionary (Generally on 1st invocation).
-        capacity_scheduler_properties = 
services['configurations']["capacity-scheduler"]["properties"]
-        Logger.info("'capacity-scheduler' configs is passed-in as a 
dictionary. "
-                    
"count(services['configurations']['capacity-scheduler']['properties']) = 
{0}".format(len(capacity_scheduler_properties)))
-    else:
-      Logger.error("Couldn't retrieve 'capacity-scheduler' from services.")
-
-    Logger.info("Retrieved 'capacity-scheduler' received as dictionary : 
'{0}'. configs : {1}"\
-                .format(received_as_key_value_pair, 
capacity_scheduler_properties.items()))
-    return capacity_scheduler_properties, received_as_key_value_pair
-
   def recommendRangerKMSConfigurations(self, configurations, clusterData, 
services, hosts):
     super(HDP25StackAdvisor, 
self).recommendRangerKMSConfigurations(configurations, clusterData, services, 
hosts)
     servicesList = [service["StackServices"]["service_name"] for service in 
services["services"]]

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2cad200/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 3744c26..d0a18fd 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -209,6 +209,68 @@ class TestHDP206StackAdvisor(TestCase):
     ]
     self.assertValidationResult(expectedItems, result)
 
+  def test_validationYARNServicecheckQueueName(self):
+    servicesInfo = [
+      {
+        "name": "YARN",
+        "components": []
+      }
+    ]
+    services = self.prepareServices(servicesInfo)
+    services["configurations"] = 
{"yarn-env":{"properties":{"service_check.queue.name": "default"}},
+                                  
"capacity-scheduler":{"properties":{"capacity-scheduler":
+                                                                        
"yarn.scheduler.capacity.ndfqueue.minimum-user-limit-percent=100\n" +
+                                                                        
"yarn.scheduler.capacity.maximum-am-resource-percent=0.2\n" +
+                                                                        
"yarn.scheduler.capacity.maximum-applications=10000\n" +
+                                                                        
"yarn.scheduler.capacity.node-locality-delay=40\n" +
+                                                                        
"yarn.scheduler.capacity.root.accessible-node-labels=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.acl_administer_queue=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.acl_administer_jobs=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.acl_submit_applications=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.maximum-capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.state=RUNNING\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.user-limit-factor=1\n" +
+                                                                        
"yarn.scheduler.capacity.root.queues=ndfqueue\n"}}}
+    hosts = self.prepareHosts([])
+    result = self.stackAdvisor.validateConfigurations(services, hosts)
+
+    expectedItems = [
+      {'message': 'service_check.queue.name is not exist, or not corresponds 
to existing leaf queue', 'level': 'ERROR'}
+    ]
+    self.assertValidationResult(expectedItems, result)
+
+  def test_validationYARNServicecheckQueueNameDefault(self):
+    servicesInfo = [
+      {
+        "name": "YARN",
+        "components": []
+      }
+    ]
+    services = self.prepareServices(servicesInfo)
+    services["configurations"] = 
{"yarn-env":{"properties":{"service_check.queue.name": "default"}},
+                                  
"capacity-scheduler":{"properties":{"capacity-scheduler":
+                                                                        
"yarn.scheduler.capacity.default.minimum-user-limit-percent=100\n" +
+                                                                        
"yarn.scheduler.capacity.maximum-am-resource-percent=0.2\n" +
+                                                                        
"yarn.scheduler.capacity.maximum-applications=10000\n" +
+                                                                        
"yarn.scheduler.capacity.node-locality-delay=40\n" +
+                                                                        
"yarn.scheduler.capacity.root.accessible-node-labels=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.acl_administer_queue=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.default.acl_administer_jobs=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.default.acl_submit_applications=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.default.capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.default.maximum-capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.default.state=RUNNING\n" +
+                                                                        
"yarn.scheduler.capacity.root.default.user-limit-factor=1\n" +
+                                                                        
"yarn.scheduler.capacity.root.queues=default\n"}}}
+    hosts = self.prepareHosts([])
+    result = self.stackAdvisor.validateConfigurations(services, hosts)
+
+    expectedItems = [ ]
+    self.assertValidationResult(expectedItems, result)
+
   def test_validationMinMax(self):
 
     configurations = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d2cad200/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 1627458..74297de 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -6166,6 +6166,81 @@ class TestHDP25StackAdvisor(TestCase):
     
self.assertEquals(configurations['hbase-site']['properties']['hbase.master.ui.readonly'],
         
expected_configuration['hbase-site']['properties']['hbase.master.ui.readonly'])
 
+  def test_validationYARNServicecheckQueueName(self):
+    servicesInfo = [
+      {
+        "name": "YARN",
+        "components": []
+      }
+    ]
+    services = self.prepareServices(servicesInfo)
+    services["configurations"] = 
{"yarn-env":{"properties":{"service_check.queue.name": "default"}},
+                                  
"capacity-scheduler":{"properties":{"capacity-scheduler":
+                                                                        
"yarn.scheduler.capacity.ndfqueue.minimum-user-limit-percent=100\n" +
+                                                                        
"yarn.scheduler.capacity.maximum-am-resource-percent=0.2\n" +
+                                                                        
"yarn.scheduler.capacity.maximum-applications=10000\n" +
+                                                                        
"yarn.scheduler.capacity.node-locality-delay=40\n" +
+                                                                        
"yarn.scheduler.capacity.root.accessible-node-labels=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.acl_administer_queue=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.acl_administer_jobs=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.acl_submit_applications=*\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.maximum-capacity=100\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.state=RUNNING\n" +
+                                                                        
"yarn.scheduler.capacity.root.ndfqueue.user-limit-factor=1\n" +
+                                                                        
"yarn.scheduler.capacity.root.queues=ndfqueue\n"}}}
+    hosts = self.prepareHosts([])
+    result = self.stackAdvisor.validateConfigurations(services, hosts)
+    expectedItems = [
+      {'message': 'service_check.queue.name is not exist, or not corresponds 
to existing leaf queue', 'level': 'ERROR'}
+    ]
+    self.assertValidationResult(expectedItems, result)
+
+  def assertValidationResult(self, expectedItems, result):
+    actualItems = []
+    for item in result["items"]:
+      next = {"message": item["message"], "level": item["level"]}
+      try:
+        next["host"] = item["host"]
+      except KeyError, err:
+        pass
+      actualItems.append(next)
+    self.checkEqual(expectedItems, actualItems)
+
+  def checkEqual(self, l1, l2):
+    if not len(l1) == len(l2) or not sorted(l1) == sorted(l2):
+      raise AssertionError("list1={0}, list2={1}".format(l1, l2))
+
+  def prepareServices(self, servicesInfo):
+    services = { "Versions" : { "stack_name" : "HDP", "stack_version" : "2.5" 
} }
+    services["services"] = []
+
+    for serviceInfo in servicesInfo:
+      nextService = {"StackServices":{"service_name" : serviceInfo["name"]}}
+      nextService["components"] = []
+      for component in serviceInfo["components"]:
+        nextComponent = {
+          "StackServiceComponents": {
+            "component_name": component["name"],
+            "cardinality": component["cardinality"],
+            "component_category": component["category"],
+            "is_master": component["is_master"]
+          }
+        }
+        try:
+          nextComponent["StackServiceComponents"]["hostnames"] = 
component["hostnames"]
+        except KeyError:
+          nextComponent["StackServiceComponents"]["hostnames"] = []
+        try:
+          nextComponent["StackServiceComponents"]["display_name"] = 
component["display_name"]
+        except KeyError:
+          nextComponent["StackServiceComponents"]["display_name"] = 
component["name"]
+        nextService["components"].append(nextComponent)
+      services["services"].append(nextService)
+
+    return services
+
   def test_phoenixQueryServerNoChangesWithUnsecure(self):
     self.maxDiff = None
     phoenix_query_server_hosts = ["c6402.ambari.apache.org"]

Reply via email to