YARN-4857. Add missing default configuration regarding preemption of 
CapacityScheduler. Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0064cba1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0064cba1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0064cba1

Branch: refs/heads/HDFS-1312
Commit: 0064cba169d1bb761f6e81ee86830be598d7c500
Parents: f1b8f6b
Author: Varun Vasudev <vvasu...@apache.org>
Authored: Thu Mar 31 14:05:49 2016 +0530
Committer: Varun Vasudev <vvasu...@apache.org>
Committed: Thu Mar 31 14:05:49 2016 +0530

----------------------------------------------------------------------
 .../src/main/resources/yarn-default.xml         | 58 ++++++++++++++++++++
 1 file changed, 58 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0064cba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index ea1afe4..33cd919 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -908,6 +908,64 @@
     <value>600000</value>
   </property>
 
+  <property>
+    <description>
+    If true, run the policy but do not affect the cluster with preemption and 
kill events.
+    </description>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.observe_only</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+    Time in milliseconds between invocations of this 
ProportionalCapacityPreemptionPolicy
+    policy.
+    </description>
+    
<name>yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval</name>
+    <value>3000</value>
+  </property>
+
+  <property>
+    <description>
+    Time in milliseconds between requesting a preemption from an application 
and killing
+    the container.
+    </description>
+    
<name>yarn.resourcemanager.monitor.capacity.preemption.max_wait_before_kill</name>
+    <value>15000</value>
+  </property>
+
+  <property>
+    <description>
+    Maximum percentage of resources preempted in a single round. By 
controlling this valueone
+    can throttle the pace at which containers are reclaimed from the cluster. 
After computing
+    the total desired preemption, the policy scales it back within this limit.
+    </description>
+    
<name>yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round</name>
+    <value>0.1</value>
+  </property>
+
+  <property>
+    <description>
+    Maximum amount of resources above the target capacity ignored for 
preemption.
+    This defines a deadzone around the target capacity that helps prevent 
thrashing and
+    oscillations around the computed target balance. High values would slow 
the time to capacity
+    and (absent natural.completions) it might prevent convergence to 
guaranteed capacity.
+    </description>
+    
<name>yarn.resourcemanager.monitor.capacity.preemption.max_ignored_over_capacity</name>
+    <value>0.1</value>
+  </property>
+
+  <property>
+    <description>
+    Given a computed preemption target, account for containers naturally 
expiring and preempt
+    only this percentage of the delta. This determines the rate of geometric 
convergence into
+    the deadzone (MAX_IGNORED_OVER_CAPACITY). For example, a termination 
factor of 0.5 will reclaim
+    almost 95% of resources within 5 * #WAIT_TIME_BEFORE_KILL, even absent 
natural termination.
+    </description>
+    
<name>yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor</name>
+    <value>0.2</value>
+  </property>
+
   <!-- Node Manager Configuration -->
 
   <property>

Reply via email to