[ 
https://issues.apache.org/jira/browse/YARN-9720?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16903167#comment-16903167
 ] 

ANANDA G B commented on YARN-9720:
----------------------------------

Hi Eric Payne, here is my CapacityScheduler.xml configuration:
<configuration>
 <property>
 <name>yarn.scheduler.capacity.maximum-applications</name>
 <value>10000</value>
 <description>
 Maximum number of applications that can be pending and running.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.resource-calculator</name>
 <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
 <description>
 The ResourceCalculator implementation to be used to compare 
 Resources in the scheduler.
 The default i.e. DefaultResourceCalculator only uses Memory while
 DominantResourceCalculator uses dominant-resource to compare 
 multi-dimensional resources such as Memory, CPU etc.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.root.queues</name>
 <value>default,root-default,queue1</value>
 <description>
 The queues at the this level (root is the root queue).
 </description>
 </property>

<property>
<name>yarn.scheduler.capacity.root.accessible-node-labels</name>
<value>pool1</value>
</property> 
<property>
<name>yarn.scheduler.capacity.root.accessible-node-labels.pool1.capacity</name>
<value>100</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.maximum-am-resource-percent</name>
<value>1</value>
</property>

<property>
<name>yarn.scheduler.capacity.root.default.capacity</name>
<value>20</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
<value>100</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.default.state</name>
<value>RUNNING</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.default.maximum-am-resource-percent</name>
<value>0.1</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.default.accessible-node-labels</name>
<value> </value>
</property>


<property>
<name>yarn.scheduler.capacity.root.root-default.capacity</name>
<value>70.0</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.root-default.maximum-capacity</name>
<value>100</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.root-default.state</name>
<value>RUNNING</value>
</property>

<property>
<name>yarn.scheduler.capacity.root.root-default.maximum-am-resource-percent</name>
<value>0.1</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.root-default.accessible-node-labels</name>
<value>pool1</value>
</property> 
<property>
<name>yarn.scheduler.capacity.root.root-default.default-node-label-expression</name>
<value>pool1</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.root-default.accessible-node-labels.pool1.capacity</name>
<value>80.0</value>
</property> 
<property>
<name>yarn.scheduler.capacity.root.root-default.accessible-node-labels.pool1.maximum-capacity</name>
<value>100.0</value>
</property>

 


<property>
<name>yarn.scheduler.capacity.root.queue1.capacity</name>
<value>10.0</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.queue1.maximum-capacity</name>
<value>100</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.queue1.state</name>
<value>RUNNING</value>
</property>

<property>
<name>yarn.scheduler.capacity.root.queue1.maximum-am-resource-percent</name>
<value>0.8</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.queue1.accessible-node-labels</name>
<value>pool1</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.queue1.default-node-label-expression</name>
<value>pool1</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.queue1.accessible-node-labels.pool1.capacity</name>
<value>20.0</value>
</property>
<property>
<name>yarn.scheduler.capacity.root.queue1.accessible-node-labels.pool1.maximum-capacity</name>
<value>100.0</value>
</property>

<property>
 <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
 <value>1</value>
 <description>
 Default queue user limit a percentage from 0.0 to 1.0.
 </description>
 </property>

 

<property>
 <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
 <value>*</value>
 <description>
 The ACL of who can submit jobs to the default queue.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
 <value>*</value>
 <description>
 The ACL of who can administer jobs on the default queue.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.root.default.acl_application_max_priority</name>
 <value>*</value>
 <description>
 The ACL of who can submit applications with configured priority.
 
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.root.default.maximum-application-lifetime
 </name>
 <value>-1</value>
 <description>
 Maximum lifetime of an application which is submitted to a queue
 in seconds. Any value less than or equal to zero will be considered as
 disabled.
 This will be a hard time limit for all applications in this
 queue. If positive value is configured then any application submitted
 to this queue will be killed after exceeds the configured lifetime.
 User can also specify lifetime per application basis in
 application submission context. But user lifetime will be
 overridden if it exceeds queue maximum lifetime. It is point-in-time
 configuration.
 Note : Configuring too low value will result in killing application
 sooner. This feature is applicable only for leaf queue.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.root.default.default-application-lifetime
 </name>
 <value>-1</value>
 <description>
 Default lifetime of an application which is submitted to a queue
 in seconds. Any value less than or equal to zero will be considered as
 disabled.
 If the user has not submitted application with lifetime value then this
 value will be taken. It is point-in-time configuration.
 Note : Default lifetime can't exceed maximum lifetime. This feature is
 applicable only for leaf queue.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.node-locality-delay</name>
 <value>40</value>
 <description>
 Number of missed scheduling opportunities after which the CapacityScheduler 
 attempts to schedule rack-local containers.
 When setting this parameter, the size of the cluster should be taken into 
account.
 We use 40 as the default value, which is approximately the number of nodes in 
one rack.
 Note, if this value is -1, the locality constraint in the container request
 will be ignored, which disables the delay scheduling.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.rack-locality-additional-delay</name>
 <value>-1</value>
 <description>
 Number of additional missed scheduling opportunities over the 
node-locality-delay
 ones, after which the CapacityScheduler attempts to schedule off-switch 
containers,
 instead of rack-local ones.
 Example: with node-locality-delay=40 and rack-locality-delay=20, the scheduler 
will
 attempt rack-local assignments after 40 missed opportunities, and off-switch 
assignments
 after 40+20=60 missed opportunities.
 When setting this parameter, the size of the cluster should be taken into 
account.
 We use -1 as the default value, which disables this feature. In this case, the 
number
 of missed opportunities for assigning off-switch containers is calculated 
based on
 the number of containers and unique locations specified in the resource 
request,
 as well as the size of the cluster.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.queue-mappings</name>
 <value></value>
 <description>
 A list of mappings that will be used to assign jobs to queues
 The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
 Typically this list will be used to map users to queues,
 for example, u:%user:%user maps all users to queues with the same name
 as the user.
 </description>
 </property>

<property>
 <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
 <value>false</value>
 <description>
 If a queue mapping is present, will it override the value specified
 by the user? This can be used by administrators to place jobs in queues
 that are different than the one specified by the user.
 The default is false.
 </description>
 </property>

<property>
 
<name>yarn.scheduler.capacity.per-node-heartbeat.maximum-offswitch-assignments</name>
 <value>1</value>
 <description>
 Controls the number of OFF_SWITCH assignments allowed
 during a node's heartbeat. Increasing this value can improve
 scheduling rate for OFF_SWITCH containers. Lower values reduce
 "clumping" of applications on particular nodes. The default is 1.
 Legal values are 1-MAX_INT. This config is refreshable.
 </description>
 </property>


 <property>
 <name>yarn.scheduler.capacity.application.fail-fast</name>
 <value>false</value>
 <description>
 Whether RM should fail during recovery if previous applications'
 queue is no longer valid.
 </description>
 </property>

</configuration>

> MR job submitted to a queue with default partition accessing the 
> non-exclusive label resources
> ----------------------------------------------------------------------------------------------
>
>                 Key: YARN-9720
>                 URL: https://issues.apache.org/jira/browse/YARN-9720
>             Project: Hadoop YARN
>          Issue Type: Bug
>          Components: capacityscheduler, resourcemanager
>    Affects Versions: 3.1.1, 3.1.2
>            Reporter: ANANDA G B
>            Assignee: ANANDA G B
>            Priority: Major
>         Attachments: Issue.png
>
>
> When MR job is submitted to a queue1 with default partition, then it is 
> accessing non-exclusive partition resources. Please find the attachments.
> MR Job command:
> ./yarn jar ../share/hadoop/mapreduce/hadoop-mapreduce-examples-3.1.1.0201.jar 
> pi -Dmapreduce.job.queuename=queue1 -Dmapreduce.job.node-label-expression= 10 
> 10
>  



--
This message was sent by Atlassian JIRA
(v7.6.14#76016)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to