hadoop git commit: HDFS-11344. The default value of the setting dfs.disk.balancer.block.tolerance.percent is different. Contributed by Yiqun Lin.

2017-01-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1cde954a4 -> 2604e82eb


HDFS-11344. The default value of the setting 
dfs.disk.balancer.block.tolerance.percent is different. Contributed by Yiqun 
Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2604e82e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2604e82e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2604e82e

Branch: refs/heads/trunk
Commit: 2604e82ebab6903a1471013767e3830e1532440a
Parents: 1cde954
Author: Anu Engineer 
Authored: Fri Jan 13 22:38:46 2017 -0800
Committer: Anu Engineer 
Committed: Fri Jan 13 22:38:46 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2604e82e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cf9c805..a00e872 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1034,7 +1034,7 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
 
   public static final String DFS_DISK_BALANCER_BLOCK_TOLERANCE =
   "dfs.disk.balancer.block.tolerance.percent";
-  public static final int DFS_DISK_BALANCER_BLOCK_TOLERANCE_DEFAULT = 5;
+  public static final int DFS_DISK_BALANCER_BLOCK_TOLERANCE_DEFAULT = 10;
 
   public static final String DFS_DISK_BALANCER_PLAN_THRESHOLD =
   "dfs.disk.balancer.plan.threshold.percent";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11306. Print remaining edit logs from buffer if edit log can't be rolled. Contributed by Wei-Chiu Chuang.

2017-01-13 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f166bb8f0 -> 914eeb997


HDFS-11306. Print remaining edit logs from buffer if edit log can't be rolled. 
Contributed by Wei-Chiu Chuang.

(cherry picked from commit 1cde954a4fe7760a09b680413ad763cbe4a8feb3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/914eeb99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/914eeb99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/914eeb99

Branch: refs/heads/branch-2
Commit: 914eeb997b798f526eb79b4ce475ba8ced2be55f
Parents: f166bb8
Author: Wei-Chiu Chuang 
Authored: Fri Jan 13 11:46:01 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Fri Jan 13 11:46:57 2017 -0800

--
 .../hdfs/server/namenode/EditsDoubleBuffer.java | 34 
 .../server/namenode/TestEditsDoubleBuffer.java  | 54 
 2 files changed, 88 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/914eeb99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
index 76a9eac..4e1dab0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
@@ -17,9 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.util.Arrays;
 
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer;
@@ -37,6 +43,7 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public class EditsDoubleBuffer {
+  protected static final Log LOG = LogFactory.getLog(EditsDoubleBuffer.class);
 
   private TxnBuffer bufCurrent; // current buffer for writing
   private TxnBuffer bufReady; // buffer ready for flushing
@@ -63,6 +70,7 @@ public class EditsDoubleBuffer {
 
 int bufSize = bufCurrent.size();
 if (bufSize != 0) {
+  bufCurrent.dumpRemainingEditLogs();
   throw new IOException("FSEditStream has " + bufSize
   + " bytes still to be flushed and cannot be closed.");
 }
@@ -157,6 +165,32 @@ public class EditsDoubleBuffer {
   numTxns = 0;
   return this;
 }
+
+private void dumpRemainingEditLogs() {
+  byte[] buf = this.getData();
+  byte[] remainingRawEdits = Arrays.copyOfRange(buf, 0, this.size());
+  ByteArrayInputStream bis = new ByteArrayInputStream(remainingRawEdits);
+  DataInputStream dis = new DataInputStream(bis);
+  FSEditLogLoader.PositionTrackingInputStream tracker =
+  new FSEditLogLoader.PositionTrackingInputStream(bis);
+  FSEditLogOp.Reader reader = FSEditLogOp.Reader.create(dis, tracker,
+  NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+  FSEditLogOp op;
+  LOG.warn("The edits buffer is " + size() + " bytes long with " + numTxns 
+
+  " unflushed transactions. " +
+  "Below is the list of unflushed transactions:");
+  int numTransactions = 0;
+  try {
+while ((op = reader.readOp(false)) != null) {
+  LOG.warn("Unflushed op [" + numTransactions + "]: " + op);
+  numTransactions++;
+}
+  } catch (IOException ioe) {
+// If any exceptions, print raw bytes and stop.
+LOG.warn("Unable to dump remaining ops. Remaining raw bytes: " +
+Hex.encodeHexString(remainingRawEdits), ioe);
+  }
+}
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/914eeb99/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
index 9feeada..b75309e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
+++ 

hadoop git commit: HDFS-11306. Print remaining edit logs from buffer if edit log can't be rolled. Contributed by Wei-Chiu Chuang.

2017-01-13 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk d3170f9eb -> 1cde954a4


HDFS-11306. Print remaining edit logs from buffer if edit log can't be rolled. 
Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cde954a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cde954a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cde954a

Branch: refs/heads/trunk
Commit: 1cde954a4fe7760a09b680413ad763cbe4a8feb3
Parents: d3170f9
Author: Wei-Chiu Chuang 
Authored: Fri Jan 13 11:46:01 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Fri Jan 13 11:46:30 2017 -0800

--
 .../hdfs/server/namenode/EditsDoubleBuffer.java | 34 
 .../server/namenode/TestEditsDoubleBuffer.java  | 54 
 2 files changed, 88 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cde954a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
index 76a9eac..4e1dab0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
@@ -17,9 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.util.Arrays;
 
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer;
@@ -37,6 +43,7 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public class EditsDoubleBuffer {
+  protected static final Log LOG = LogFactory.getLog(EditsDoubleBuffer.class);
 
   private TxnBuffer bufCurrent; // current buffer for writing
   private TxnBuffer bufReady; // buffer ready for flushing
@@ -63,6 +70,7 @@ public class EditsDoubleBuffer {
 
 int bufSize = bufCurrent.size();
 if (bufSize != 0) {
+  bufCurrent.dumpRemainingEditLogs();
   throw new IOException("FSEditStream has " + bufSize
   + " bytes still to be flushed and cannot be closed.");
 }
@@ -157,6 +165,32 @@ public class EditsDoubleBuffer {
   numTxns = 0;
   return this;
 }
+
+private void dumpRemainingEditLogs() {
+  byte[] buf = this.getData();
+  byte[] remainingRawEdits = Arrays.copyOfRange(buf, 0, this.size());
+  ByteArrayInputStream bis = new ByteArrayInputStream(remainingRawEdits);
+  DataInputStream dis = new DataInputStream(bis);
+  FSEditLogLoader.PositionTrackingInputStream tracker =
+  new FSEditLogLoader.PositionTrackingInputStream(bis);
+  FSEditLogOp.Reader reader = FSEditLogOp.Reader.create(dis, tracker,
+  NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+  FSEditLogOp op;
+  LOG.warn("The edits buffer is " + size() + " bytes long with " + numTxns 
+
+  " unflushed transactions. " +
+  "Below is the list of unflushed transactions:");
+  int numTransactions = 0;
+  try {
+while ((op = reader.readOp(false)) != null) {
+  LOG.warn("Unflushed op [" + numTransactions + "]: " + op);
+  numTransactions++;
+}
+  } catch (IOException ioe) {
+// If any exceptions, print raw bytes and stop.
+LOG.warn("Unable to dump remaining ops. Remaining raw bytes: " +
+Hex.encodeHexString(remainingRawEdits), ioe);
+  }
+}
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cde954a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
index 9feeada..b75309e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
+++ 

[1/2] hadoop git commit: YARN-5980. Update documentation for single node hbase deploy. Contributed by Vrushali C.

2017-01-13 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 8df6f98e5 -> e1bdba778
  refs/heads/YARN-5355-branch-2 cf7f9e91f -> 8d1e41407


YARN-5980. Update documentation for single node hbase deploy. Contributed by 
Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1bdba77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1bdba77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1bdba77

Branch: refs/heads/YARN-5355
Commit: e1bdba77888723b435a235a96c8659029afd25d5
Parents: 8df6f98
Author: Sangjin Lee 
Authored: Fri Jan 13 09:12:48 2017 -0800
Committer: Sangjin Lee 
Committed: Fri Jan 13 09:12:48 2017 -0800

--
 .../src/site/markdown/TimelineServiceV2.md  | 63 +---
 1 file changed, 55 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1bdba77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 9a06b47..0d77f2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -165,18 +165,64 @@ New configuration parameters that are introduced with v.2 
are marked bold.
 ### Enabling Timeline Service v.2
 
  Preparing Apache HBase cluster for storage
+There are a few steps to be done for preparing the storage for Timeline 
Service v.2:
+
+Step 1) [Set up the HBase cluster](#Set_up_the_HBase_cluster)
+
+Step 2) [Enable the coprocessor](#Enable_the_coprocessor)
+
+Step 3) [Create the schema for Timeline Service v.2](#Create_schema)
+
+Each step is explained in more detail below.
+
+#  Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the 
storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.1.x. 
The 1.0.x versions
-do not work with Timeline Service v.2. The 1.2.x versions have not been tested.
+version of Apache HBase that is supported with Timeline Service v.2 is 1.2.4. 
The 1.0.x versions
+do not work with Timeline Service v.2. Later versions of HBase have not been 
tested with
+Timeline Service.
+
+HBase has different deployment modes. Refer to the HBase book for 
understanding them and pick a
+mode that is suitable for your setup.
+(http://hbase.apache.org/book.html#standalone_dist)
+
+# Simple deployment for HBase
+If you are intent on a simple deploy profile for the Apache HBase cluster
+where the data loading is light but the data needs to persist across node
+comings and goings, you could consider the "Standalone HBase over HDFS" deploy 
mode.
+
+This is a useful variation on the standalone HBase setup and has all HBase 
daemons running inside
+one JVM but rather than persisting to the local filesystem, it persists to an 
HDFS instance.
+Writing to HDFS where data is replicated ensures that data is persisted across 
node
+comings and goings. To configure this standalone variant, edit your 
`hbase-site.xml` setting
+the `hbase.rootdir` to point at a directory in your HDFS instance but then set
+`hbase.cluster.distributed` to false. For example:
+
+```
+
+  
+hbase.rootdir
+hdfs://namenode.example.org:8020/hbase
+  
+  
+hbase.cluster.distributed
+false
+  
+
+```
+
+For more details on this mode, refer to
+http://hbase.apache.org/book.html#standalone.over.hdfs .
+
+Once you have an Apache HBase cluster ready to use, perform the following 
steps.
 
-Once you have an Apache HBase cluster ready to use for this purpose, perform 
the following steps.
+#  Step 2) Enable the coprocessor
 
-First, add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
+Step 2.1) Add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
 is needed for the coprocessor as well as the schema creator. For example,
 
 cp hadoop-yarn-server-timelineservice-3.0.0-alpha1-SNAPSHOT.jar 
/usr/hbase/lib/
 
-Then, enable the coprocessor that handles the aggregation. To enable it, add 
the following entry in
+Step 2.2) Enable the coprocessor that handles the aggregation. To enable it, 
add the following entry in
 region servers' `hbase-site.xml` file (generally located in the `conf` 
directory) as follows:
 
 ```
@@ -186,10 +232,11 @@ region servers' `hbase-site.xml` file (generally located 
in the `conf` directory
 
 ```
 
-Restart the region servers and the master to pick up the 

hadoop git commit: YARN-6081. LeafQueue#getTotalPendingResourcesConsideringUserLimit should deduct reserved from pending to avoid unnecessary preemption of reserved container. Contributed by Wangda Ta

2017-01-13 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 963ef1e31 -> f166bb8f0


YARN-6081. LeafQueue#getTotalPendingResourcesConsideringUserLimit should deduct 
reserved from pending to avoid unnecessary preemption of reserved container. 
Contributed by Wangda Tan.

(cherry picked from commit d3170f9eba9bc5c38b5fa50c24e37ca2bd5636c2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f166bb8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f166bb8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f166bb8f

Branch: refs/heads/branch-2
Commit: f166bb8f097442e4b831e34c500339ddc4bf3e79
Parents: 963ef1e
Author: Sunil G 
Authored: Fri Jan 13 18:22:29 2017 +0530
Committer: Sunil G 
Committed: Fri Jan 13 18:30:31 2017 +0530

--
 .../monitor/capacity/TempQueuePerPartition.java | 21 ++---
 .../scheduler/SchedulerApplicationAttempt.java  | 10 ++-
 .../scheduler/capacity/LeafQueue.java   | 48 ---
 ...alCapacityPreemptionPolicyMockFramework.java | 16 ++--
 ...estProportionalCapacityPreemptionPolicy.java | 60 --
 .../capacity/TestContainerAllocation.java   | 85 
 .../scheduler/capacity/TestLeafQueue.java   | 31 +++
 7 files changed, 202 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f166bb8f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
index 28099c4..9783457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
@@ -51,6 +51,8 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
   LeafQueue leafQueue;
   boolean preemptionDisabled;
 
+  protected Resource pendingDeductReserved;
+
   TempQueuePerPartition(String queueName, Resource current,
   boolean preemptionDisabled, String partition, Resource killable,
   float absCapacity, float absMaxCapacity, Resource totalPartitionResource,
@@ -61,10 +63,13 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
 if (queue instanceof LeafQueue) {
   LeafQueue l = (LeafQueue) queue;
   pending = l.getTotalPendingResourcesConsideringUserLimit(
-  totalPartitionResource, partition);
+  totalPartitionResource, partition, false);
+  pendingDeductReserved = l.getTotalPendingResourcesConsideringUserLimit(
+  totalPartitionResource, partition, true);
   leafQueue = l;
 } else {
   pending = Resources.createResource(0);
+  pendingDeductReserved = Resources.createResource(0);
 }
 
 this.normalizedGuarantee = Float.NaN;
@@ -95,16 +100,13 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
 assert leafQueue == null;
 children.add(q);
 Resources.addTo(pending, q.pending);
+Resources.addTo(pendingDeductReserved, q.pendingDeductReserved);
   }
 
   public ArrayList getChildren() {
 return children;
   }
 
-  public Resource getUsedDeductReservd() {
-return Resources.subtract(current, reserved);
-  }
-
   // This function "accepts" all the resources it can (pending) and return
   // the unused ones
   Resource offer(Resource avail, ResourceCalculator rc,
@@ -121,7 +123,8 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
  * When we're using FifoPreemptionSelector 
(considerReservedResource
  * = false).
  *
- * We should deduct reserved resource to avoid excessive 
preemption:
+ * We should deduct reserved resource from pending to avoid 
excessive
+ * preemption:
  *
  * For example, if an under-utilized queue has used = reserved = 
20.
  * Preemption policy will try to preempt 20 containers (which is 
not
@@ -131,10 +134,8 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {

hadoop git commit: YARN-6081. LeafQueue#getTotalPendingResourcesConsideringUserLimit should deduct reserved from pending to avoid unnecessary preemption of reserved container. Contributed by Wangda Ta

2017-01-13 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1f344e057 -> d3170f9eb


YARN-6081. LeafQueue#getTotalPendingResourcesConsideringUserLimit should deduct 
reserved from pending to avoid unnecessary preemption of reserved container. 
Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3170f9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3170f9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3170f9e

Branch: refs/heads/trunk
Commit: d3170f9eba9bc5c38b5fa50c24e37ca2bd5636c2
Parents: 1f344e0
Author: Sunil G 
Authored: Fri Jan 13 18:22:29 2017 +0530
Committer: Sunil G 
Committed: Fri Jan 13 18:22:29 2017 +0530

--
 .../monitor/capacity/TempQueuePerPartition.java | 21 ++---
 .../scheduler/SchedulerApplicationAttempt.java  | 10 ++-
 .../scheduler/capacity/LeafQueue.java   | 48 ---
 ...alCapacityPreemptionPolicyMockFramework.java | 16 ++--
 ...estProportionalCapacityPreemptionPolicy.java | 60 --
 .../capacity/TestContainerAllocation.java   | 85 
 .../scheduler/capacity/TestLeafQueue.java   | 31 +++
 7 files changed, 202 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3170f9e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
index 28099c4..9783457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
@@ -51,6 +51,8 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
   LeafQueue leafQueue;
   boolean preemptionDisabled;
 
+  protected Resource pendingDeductReserved;
+
   TempQueuePerPartition(String queueName, Resource current,
   boolean preemptionDisabled, String partition, Resource killable,
   float absCapacity, float absMaxCapacity, Resource totalPartitionResource,
@@ -61,10 +63,13 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
 if (queue instanceof LeafQueue) {
   LeafQueue l = (LeafQueue) queue;
   pending = l.getTotalPendingResourcesConsideringUserLimit(
-  totalPartitionResource, partition);
+  totalPartitionResource, partition, false);
+  pendingDeductReserved = l.getTotalPendingResourcesConsideringUserLimit(
+  totalPartitionResource, partition, true);
   leafQueue = l;
 } else {
   pending = Resources.createResource(0);
+  pendingDeductReserved = Resources.createResource(0);
 }
 
 this.normalizedGuarantee = Float.NaN;
@@ -95,16 +100,13 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
 assert leafQueue == null;
 children.add(q);
 Resources.addTo(pending, q.pending);
+Resources.addTo(pendingDeductReserved, q.pendingDeductReserved);
   }
 
   public ArrayList getChildren() {
 return children;
   }
 
-  public Resource getUsedDeductReservd() {
-return Resources.subtract(current, reserved);
-  }
-
   // This function "accepts" all the resources it can (pending) and return
   // the unused ones
   Resource offer(Resource avail, ResourceCalculator rc,
@@ -121,7 +123,8 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
  * When we're using FifoPreemptionSelector 
(considerReservedResource
  * = false).
  *
- * We should deduct reserved resource to avoid excessive 
preemption:
+ * We should deduct reserved resource from pending to avoid 
excessive
+ * preemption:
  *
  * For example, if an under-utilized queue has used = reserved = 
20.
  * Preemption policy will try to preempt 20 containers (which is 
not
@@ -131,10 +134,8 @@ public class TempQueuePerPartition extends 
AbstractPreemptionEntity {
  * resource can be used by pending request, so policy will preempt