[hadoop] branch trunk updated: HDFS-14487. Missing Space in Client Error Message (Contributed by Shweta Yakkali via Daniel Templeton)

2019-06-18 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3c1a1ce  HDFS-14487. Missing Space in Client Error Message 
(Contributed by Shweta Yakkali via Daniel Templeton)
3c1a1ce is described below

commit 3c1a1ceea9e35ac53376276139416b728ed57f10
Author: Shweta Yakkali 
AuthorDate: Tue Jun 18 10:20:40 2019 -0700

HDFS-14487. Missing Space in Client Error Message (Contributed by Shweta 
Yakkali via Daniel Templeton)

Change-Id: I0f8ce74a35ab24fe94fd0e57d8247bb3fa575e6f
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index a4e0742..4a0d75e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -965,7 +965,7 @@ public class DFSOutputStream extends FSOutputSummer
 }
 try {
   if (retries == 0) {
-throw new IOException("Unable to close file because the last block"
+throw new IOException("Unable to close file because the last block 
"
 + last + " does not have enough number of replicas.");
   }
   retries--;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14359. Inherited ACL permissions masked when parent directory does not exist (mkdir -p) (Contributed by Stephen O'Donnell via Daniel Templeton)

2019-03-25 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3f6d6d2  HDFS-14359. Inherited ACL permissions masked when parent 
directory does not exist (mkdir -p) (Contributed by Stephen O'Donnell via 
Daniel Templeton)
3f6d6d2 is described below

commit 3f6d6d28119049b003cb81735ce675e52d0d2104
Author: Stephen O'Donnell 
AuthorDate: Mon Mar 25 16:16:13 2019 -0700

HDFS-14359. Inherited ACL permissions masked when parent directory does not 
exist (mkdir -p)
(Contributed by Stephen O'Donnell via Daniel Templeton)

Change-Id: Ia83f799a8f56aa8057a967b234f184683395fa41
---
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  | 18 +++--
 .../hadoop/hdfs/server/namenode/FSAclBaseTest.java |  2 +-
 .../testAclCLIWithPosixAclInheritance.xml  | 77 ++
 3 files changed, 92 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 2f0a0fc..95e8898 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.fs.permission.FsCreateModes;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
@@ -187,10 +188,19 @@ class FSDirMkdirOp {
   private static PermissionStatus addImplicitUwx(PermissionStatus parentPerm,
   PermissionStatus perm) {
 FsPermission p = parentPerm.getPermission();
-FsPermission ancestorPerm = new FsPermission(
-p.getUserAction().or(FsAction.WRITE_EXECUTE),
-p.getGroupAction(),
-p.getOtherAction());
+FsPermission ancestorPerm;
+if (p.getUnmasked() == null) {
+  ancestorPerm = new FsPermission(
+  p.getUserAction().or(FsAction.WRITE_EXECUTE),
+  p.getGroupAction(),
+  p.getOtherAction());
+} else {
+  ancestorPerm = FsCreateModes.create(
+  new FsPermission(
+p.getUserAction().or(FsAction.WRITE_EXECUTE),
+p.getGroupAction(),
+p.getOtherAction()), p.getUnmasked());
+}
 return new PermissionStatus(perm.getUserName(), perm.getGroupName(),
 ancestorPerm);
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index ee92217..fd50648 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -1150,7 +1150,7 @@ public abstract class FSAclBaseTest {
 AclStatus s = fs.getAclStatus(dirPath);
 AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
 assertArrayEquals(expected, returned);
-assertPermission(dirPath, (short)010750);
+assertPermission(dirPath, (short)010770);
 assertAclFeature(dirPath, true);
 s = fs.getAclStatus(subdirPath);
 returned = s.getEntries().toArray(new AclEntry[0]);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLIWithPosixAclInheritance.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLIWithPosixAclInheritance.xml
index 7e9ace1..9ff71b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLIWithPosixAclInheritance.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLIWithPosixAclInheritance.xml
@@ -741,6 +741,83 @@
   
 
 
+  
+  setfacl : check inherit default ACL to ancestor dir with 
mkdir -p
+  
+-fs NAMENODE -mkdir /dir1
+-fs NAMENODE -setfacl -m 
default:user:charlie:r-x,default:group:admin:rwx /dir1
+-fs NAMENODE -mkdir -p /dir1/dir2/dir3
+-fs NAMENODE -getfacl /dir1/dir2
+  
+  
+-fs NAMENODE -rm -R /dir1
+  
+  
+
+  SubstringComparator
+  # file: /dir1/dir2
+
+
+  SubstringComparator
+  # owner: USERNAME
+
+
+  SubstringComparator
+  # group: supergroup
+
+
+  SubstringComparator
+  user::rwx
+
+
+  
+  RegexpComparator
+  ^user:charlie:r-x$
+
+
+  SubstringComparator
+  group::r-x

[hadoop] branch trunk updated: YARN-9358. Add javadoc to new methods introduced in FSQueueMetrics with YARN-9322 (Contributed by Zoltan Siegl via Daniel Templeton)

2019-03-22 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ce5eb9c  YARN-9358. Add javadoc to new methods introduced in 
FSQueueMetrics with YARN-9322 (Contributed by Zoltan Siegl via Daniel Templeton)
ce5eb9c is described below

commit ce5eb9cb2e04baf2e94fdc7dcdb57d0404cf6e76
Author: Zoltan Siegl 
AuthorDate: Fri Mar 22 11:23:50 2019 +0100

YARN-9358. Add javadoc to new methods introduced in FSQueueMetrics with 
YARN-9322
(Contributed by Zoltan Siegl via Daniel Templeton)

Change-Id: I92d52c0ca630e71afb26b2b7587cbdbe79254a05
---
 .../scheduler/fair/FSQueueMetrics.java | 69 +-
 1 file changed, 67 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
index d0ddd42..5fa84f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
@@ -52,6 +52,15 @@ public class FSQueueMetrics extends QueueMetrics {
   private final FSQueueMetricsForCustomResources customResources;
   private String schedulingPolicy;
 
+  /**
+   * Constructor for {@link FairScheduler} queue metrics data object.
+   *
+   * @param ms the MetricSystem to register with
+   * @param queueName the queue name
+   * @param parent the parent {@link Queue}
+   * @param enableUserMetrics store metrics on user level
+   * @param conf the {@link Configuration} object to build buckets upon
+   */
   FSQueueMetrics(MetricsSystem ms, String queueName, Queue parent,
   boolean enableUserMetrics, Configuration conf) {
 super(ms, queueName, parent, enableUserMetrics, conf);
@@ -72,6 +81,11 @@ public class FSQueueMetrics extends QueueMetrics {
 return fairShareVCores.value();
   }
 
+  /**
+   * Get instantaneous fair share of the queue.
+   *
+   * @return the returned {@link Resource} also contains custom resource types
+   */
   public Resource getFairShare() {
 if (customResources != null) {
   return Resource.newInstance(fairShareMB.value(),
@@ -82,6 +96,12 @@ public class FSQueueMetrics extends QueueMetrics {
 (int) fairShareVCores.value());
   }
 
+  /**
+   * Set instantaneous fair share of the queue.
+   *
+   * @param resource the passed {@link Resource} object may also contain custom
+   * resource types
+   */
   public void setFairShare(Resource resource) {
 fairShareMB.set(resource.getMemorySize());
 fairShareVCores.set(resource.getVirtualCores());
@@ -98,6 +118,11 @@ public class FSQueueMetrics extends QueueMetrics {
 return steadyFairShareVCores.value();
   }
 
+  /**
+   * Get steady fair share for queue.
+   *
+   * @return the returned {@link Resource} also contains custom resource types
+   */
   public Resource getSteadyFairShare() {
 if (customResources != null) {
   return Resource.newInstance(steadyFairShareMB.value(),
@@ -108,6 +133,12 @@ public class FSQueueMetrics extends QueueMetrics {
 (int) steadyFairShareVCores.value());
   }
 
+  /**
+   * Set steady fair share for queue.
+   *
+   * @param resource the passed {@link Resource} object may also contain custom
+   * resource types
+   */
   public void setSteadyFairShare(Resource resource) {
 steadyFairShareMB.set(resource.getMemorySize());
 steadyFairShareVCores.set(resource.getVirtualCores());
@@ -124,6 +155,11 @@ public class FSQueueMetrics extends QueueMetrics {
 return minShareVCores.value();
   }
 
+  /**
+   * Get minimum required resource share for queue.
+   *
+   * @return the returned {@link Resource} also contains custom resource types
+   */
   public Resource getMinShare() {
 if (customResources != null) {
   return Resource.newInstance(minShareMB.value(),
@@ -134,6 +170,12 @@ public class FSQueueMetrics extends QueueMetrics {
 (int) minShareVCores.value());
   }
 
+  /**
+   * Set minimum required resource share for queue.
+   *
+   * @param resource the passed {@link Resource} object may also contain custom
+   * resource types
+   */
   public void setMinShare(Resource resource) {
 minShareMB.set(resource.getMemorySize());
 minShareVCores.set(resource.getVirtualCores());
@@ -150,6 +192,11 @@ public class FSQueueMetrics

[hadoop] branch trunk updated: HDFS-14328. [Clean-up] Remove NULL check before instanceof in TestGSet (Contributed by Shweta Yakkali via Daniel Templeton)

2019-03-18 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2db38ab  HDFS-14328. [Clean-up] Remove NULL check before instanceof in 
TestGSet (Contributed by Shweta Yakkali via Daniel Templeton)
2db38ab is described below

commit 2db38abffcd89bf1fa0cad953254daea7e4e415b
Author: Shweta Yakkali 
AuthorDate: Mon Mar 18 15:10:26 2019 +0100

HDFS-14328. [Clean-up] Remove NULL check before instanceof in TestGSet
(Contributed by Shweta Yakkali via Daniel Templeton)

Change-Id: I5b9f0e4714d7c5bbfa30492a09f770626711
---
 .../hadoop-common/src/test/java/org/apache/hadoop/util/TestGSet.java   | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGSet.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGSet.java
index 2d39f3d..14dde6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGSet.java
@@ -23,7 +23,6 @@ import java.util.Iterator;
 import java.util.Random;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -471,7 +470,7 @@ public class TestGSet {
 
 @Override
 public boolean equals(Object obj) {
-  return obj != null && obj instanceof IntElement
+  return obj instanceof IntElement
   && value == ((IntElement)obj).value;
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9340. [Clean-up] Remove NULL check before instanceof in ResourceRequestSetKey (Contributed by Shweta Yakkali via Daniel Templeton)

2019-03-18 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0e7e901  YARN-9340. [Clean-up] Remove NULL check before instanceof in 
ResourceRequestSetKey (Contributed by Shweta Yakkali via Daniel Templeton)
0e7e901 is described below

commit 0e7e9013d4a0785ae22a5a569c3977aeaf7e1900
Author: Shweta Yakkali 
AuthorDate: Mon Mar 18 05:52:48 2019 -0700

YARN-9340. [Clean-up] Remove NULL check before instanceof in 
ResourceRequestSetKey
(Contributed by Shweta Yakkali via Daniel Templeton)

Change-Id: I932e29b36f086f7b7c76a250e33b473617ddbda1
---
 .../org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java
index 4db88ef..3816261 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java
@@ -88,7 +88,7 @@ public class ResourceRequestSetKey extends 
SchedulerRequestKey {
 
   @Override
   public boolean equals(Object obj) {
-if (obj == null || !(obj instanceof SchedulerRequestKey)) {
+if (!(obj instanceof SchedulerRequestKey)) {
   return false;
 }
 if (!(obj instanceof ResourceRequestSetKey)) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: MAPREDUCE-7188. [Clean-up] Remove NULL check before instanceof and fix checkstyle issue in TaskResult (Contributed by Shweta Yakkali via Daniel Templeton)

2019-03-18 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cb4d911  MAPREDUCE-7188. [Clean-up] Remove NULL check before 
instanceof and fix checkstyle issue in TaskResult (Contributed by Shweta 
Yakkali via Daniel Templeton)
cb4d911 is described below

commit cb4d911a82c0ca7a31092da868471ad711c9afcf
Author: Shweta Yakkali 
AuthorDate: Sat Mar 16 12:34:16 2019 -0700

MAPREDUCE-7188. [Clean-up] Remove NULL check before instanceof and fix 
checkstyle issue in TaskResult
(Contributed by Shweta Yakkali via Daniel Templeton)

Change-Id: Ie5eb9462f94e45cfd9d2a4b85c081ac8be949c07
---
 .../org/apache/hadoop/examples/pi/TaskResult.java  | 34 ++
 1 file changed, 22 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/TaskResult.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/TaskResult.java
index d60d4ce..cd8eba3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/TaskResult.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/TaskResult.java
@@ -25,7 +25,8 @@ import org.apache.hadoop.examples.pi.math.Summation;
 import org.apache.hadoop.io.Writable;
 
 /** A class for map task results or reduce task results. */
-public class TaskResult implements Container, 
Combinable, Writable {
+public class TaskResult implements Container,
+Combinable, Writable {
   private Summation sigma;
   private long duration;
 
@@ -38,10 +39,14 @@ public class TaskResult implements Container, 
Combinable,
 
   /** {@inheritDoc} */
   @Override
-  public Summation getElement() {return sigma;}
+  public Summation getElement() {
+return sigma;
+  }
 
   /** @return The time duration used */
-  long getDuration() {return duration;}
+  long getDuration() {
+return duration;
+  }
 
   /** {@inheritDoc} */
   @Override
@@ -54,7 +59,7 @@ public class TaskResult implements Container, 
Combinable,
   public boolean equals(Object obj) {
 if (this == obj)
   return true;
-else if (obj != null && obj instanceof TaskResult) {
+else if (obj instanceof TaskResult) {
   final TaskResult that = (TaskResult)obj;
   return this.compareTo(that) == 0;
 }
@@ -62,7 +67,7 @@ public class TaskResult implements Container, 
Combinable,
   "obj.getClass()=" + obj.getClass());
   }
 
-  /** Not supported */
+  /** Not supported. */
   @Override
   public int hashCode() {
 throw new UnsupportedOperationException();
@@ -72,7 +77,7 @@ public class TaskResult implements Container, 
Combinable,
   @Override
   public TaskResult combine(TaskResult that) {
 final Summation s = sigma.combine(that.sigma);
-return s == null? null: new TaskResult(s, this.duration + that.duration);
+return s == null ? null : new TaskResult(s, this.duration + that.duration);
   }
 
   /** {@inheritDoc} */
@@ -92,22 +97,27 @@ public class TaskResult implements Container, 
Combinable,
   /** {@inheritDoc} */
   @Override
   public String toString() {
-return "sigma=" + sigma + ", duration=" + duration + "(" + 
Util.millis2String(duration) + ")";
+return "sigma=" + sigma + ", duration=" + duration + "(" +
+Util.millis2String(duration) + ")";
   }
 
-  /** Covert a String to a TaskResult */
+  /** Covert a String to a TaskResult. */
   public static TaskResult valueOf(String s) {
 int i = 0;
 int j = s.indexOf(", duration=");
 if (j < 0)
-  throw new IllegalArgumentException("i=" + i + ", j=" + j + " < 0, s=" + 
s);
-final Summation sigma = 
Summation.valueOf(Util.parseStringVariable("sigma", s.substring(i, j)));
+  throw new IllegalArgumentException("i=" + i + ", j=" + j +
+  " < 0, s=" + s);
+final Summation sigma =
+Summation.valueOf(Util.parseStringVariable("sigma", s.substring(i, 
j)));
 
 i = j + 2;
 j = s.indexOf("(", i);
 if (j < 0)
-  throw new IllegalArgumentException("i=" + i + ", j=" + j + " < 0, s=" + 
s);
-final long duration = Util.parseLongVariable("duration", s.substring(i, 
j));
+  throw new IllegalArgumentException("i=" + i + ", j=" + j +
+  " < 0, s=" + s);
+final long duration =
+Util.parseLongVariable("duration", s.substring(i, j));
 
 return new TaskResult(sigma, duration);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16157. [Clean-up] Remove NULL check before instanceof in AzureNativeFileSystemStore (Contributed by Shweta Yakkali via Daniel Templeton)

2019-03-09 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bb8ad09  HADOOP-16157. [Clean-up] Remove NULL check before instanceof 
in AzureNativeFileSystemStore (Contributed by Shweta Yakkali via Daniel 
Templeton)
bb8ad09 is described below

commit bb8ad096e785f7127a5c0de15167255d9b119578
Author: Shweta Yakkali 
AuthorDate: Sat Mar 9 09:55:05 2019 -0800

HADOOP-16157. [Clean-up] Remove NULL check before instanceof in 
AzureNativeFileSystemStore
(Contributed by Shweta Yakkali via Daniel Templeton)

Change-Id: I6269ae66378e46eed440a76f847ae1af1fa95450
---
 .../java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index a87bbc5..239dec2 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -1762,7 +1762,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   throw new AzureException(e);
 } catch (IOException e) {
   Throwable t = e.getCause();
-  if (t != null && t instanceof StorageException) {
+  if (t instanceof StorageException) {
 StorageException se = (StorageException) t;
 // If we got this exception, the blob should have already been created
 if (!"LeaseIdMissing".equals(se.getErrorCode())) {
@@ -2638,7 +2638,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   return delete(key, null);
 } catch (IOException e) {
   Throwable t = e.getCause();
-  if (t != null && t instanceof StorageException) {
+  if (t instanceof StorageException) {
 StorageException se = (StorageException) t;
 if ("LeaseIdMissing".equals(se.getErrorCode())){
   SelfRenewingLease lease = null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9323. FSLeafQueue#computeMaxAMResource does not override zero values for custom resources (Contributed by Szilard Nemeth via Daniel Templeton)

2019-02-27 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 538bb48  YARN-9323. FSLeafQueue#computeMaxAMResource does not override 
zero values for custom resources (Contributed by Szilard Nemeth via Daniel 
Templeton)
538bb48 is described below

commit 538bb4880da43714af68143e9a1dde195bd77099
Author: Szilard Nemeth 
AuthorDate: Wed Feb 27 19:49:34 2019 -0800

YARN-9323. FSLeafQueue#computeMaxAMResource does not override zero values 
for custom resources
(Contributed by Szilard Nemeth via Daniel Templeton)

Change-Id: Id844ccf09488f367c0c7de0a3b2d4aca1bba31cc
---
 .../resourcemanager/scheduler/QueueMetrics.java|   4 +
 .../scheduler/QueueMetricsForCustomResources.java  |   4 +
 .../scheduler/fair/FSLeafQueue.java|  26 
 .../scheduler/fair/FSQueueMetrics.java |   4 +
 .../scheduler/fair/TestFSLeafQueue.java| 134 +
 5 files changed, 172 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 0a01c60..d126f09 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -832,4 +832,8 @@ public class QueueMetrics implements MetricsSource {
   public long getAggregatePreemptedContainers() {
 return aggregateContainersPreempted.value();
   }
+
+  public QueueMetricsForCustomResources getQueueMetricsForCustomResources() {
+return queueMetricsForCustomResources;
+  }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsForCustomResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsForCustomResources.java
index e8c8897..3470858 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsForCustomResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsForCustomResources.java
@@ -101,4 +101,8 @@ public class QueueMetricsForCustomResources {
   QueueMetricsCustomResource getAggregatePreemptedSeconds() {
 return aggregatePreemptedSeconds;
   }
+
+  public QueueMetricsCustomResource getAvailable() {
+return available;
+  }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 3deddee..044254d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -23,6 +23,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -42,6 +43,8 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetricsCustomResource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetricsForCustomResources;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt

[hadoop] branch trunk updated: YARN-9322. Store metrics for custom resource types into FSQueueMetrics and query them in FairSchedulerQueueInfo (Contributed by Szilard Nemeth via Daniel Templeton)

2019-02-27 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7b928f1  YARN-9322. Store metrics for custom resource types into 
FSQueueMetrics and query them in FairSchedulerQueueInfo (Contributed by Szilard 
Nemeth via Daniel Templeton)
7b928f1 is described below

commit 7b928f19a4521d46ed7c48f8ce5c936acfd6b794
Author: Szilard Nemeth 
AuthorDate: Wed Feb 27 14:23:34 2019 -0800

YARN-9322. Store metrics for custom resource types into FSQueueMetrics and 
query them in FairSchedulerQueueInfo
(Contributed by Szilard Nemeth via Daniel Templeton)

Change-Id: I14c12f1265999d62102f2ec5506d90015efeefe8
---
 .../FSQueueMetricsForCustomResources.java  | 113 ++
 .../resourcemanager/scheduler/QueueMetrics.java|   2 -
 .../scheduler/QueueMetricsCustomResource.java  |  76 +++
 .../scheduler/QueueMetricsForCustomResources.java  |  54 -
 .../scheduler/fair/FSQueueMetrics.java | 124 +--
 .../webapp/dao/FairSchedulerQueueInfo.java |  10 +-
 .../resourcemanager/webapp/dao/ResourceInfo.java   |   1 -
 .../TestQueueMetricsForCustomResources.java|   2 -
 .../scheduler/fair/TestFSQueueMetrics.java | 247 -
 9 files changed, 543 insertions(+), 86 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/FSQueueMetricsForCustomResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/FSQueueMetricsForCustomResources.java
new file mode 100644
index 000..2f73d6b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/FSQueueMetricsForCustomResources.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+
+import java.util.Map;
+
+/**
+ * This class is a main entry-point for any kind of metrics for
+ * custom resources.
+ * It provides increase and decrease methods for all types of metrics.
+ */
+public class FSQueueMetricsForCustomResources {
+  private final QueueMetricsCustomResource fairShare =
+  new QueueMetricsCustomResource();
+  private final QueueMetricsCustomResource steadyFairShare =
+  new QueueMetricsCustomResource();
+  private final QueueMetricsCustomResource minShare =
+  new QueueMetricsCustomResource();
+  private final QueueMetricsCustomResource maxShare =
+  new QueueMetricsCustomResource();
+  private final QueueMetricsCustomResource maxAMShare =
+  new QueueMetricsCustomResource();
+  private final QueueMetricsCustomResource amResourceUsage =
+  new QueueMetricsCustomResource();
+
+  public QueueMetricsCustomResource getFairShare() {
+return fairShare;
+  }
+
+  public void setFairShare(Resource res) {
+fairShare.set(res);
+  }
+
+  public Map getFairShareValues() {
+return fairShare.getValues();
+  }
+
+  public QueueMetricsCustomResource getSteadyFairShare() {
+return steadyFairShare;
+  }
+
+  public void setSteadyFairShare(Resource res) {
+steadyFairShare.set(res);
+  }
+
+  public Map getSteadyFairShareValues() {
+return steadyFairShare.getValues();
+  }
+
+  public QueueMetricsCustomResource getMinShare() {
+return minShare;
+  }
+
+  public void setMinShare(Resource res) {
+minShare.set(res);
+  }
+
+  public Map getMinShareValues() {
+return minShare.getValues();
+  }
+
+  public QueueMetricsCustomResource getMaxShare() {
+return maxShare;
+  }
+
+  public void setMaxShare(Resource res) {
+maxShare.set(res);
+  }
+
+  public Map getMaxShareValues() {
+return maxShare.getValues();
+  }
+
+  public QueueMetricsCustomResource getMaxAMShare() {
+return maxAMShare;
+

[hadoop] branch trunk updated: YARN-9318. Resources#multiplyAndRoundUp does not consider Resource Types (Contributed by Szilard Nemeth via Daniel Templeton)

2019-02-27 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 29e27fa  YARN-9318. Resources#multiplyAndRoundUp does not consider 
Resource Types (Contributed by Szilard Nemeth via Daniel Templeton)
29e27fa is described below

commit 29e27faf96b6e5b0ff3bca98f43a662d9d689f0a
Author: Szilard Nemeth 
AuthorDate: Wed Feb 27 10:04:55 2019 -0800

YARN-9318. Resources#multiplyAndRoundUp does not consider Resource Types
(Contributed by Szilard Nemeth via Daniel Templeton)

Change-Id: Ia45f528574c2b054f6f764d1d140e592bdb7e217
---
 .../hadoop/yarn/util/resource/Resources.java   | 68 
 .../hadoop/yarn/util/resource/TestResources.java   | 72 ++
 2 files changed, 89 insertions(+), 51 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 4764147..40d8d38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -35,6 +35,8 @@ import 
org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 @Unstable
 public class Resources {
 
+  private enum RoundingDirection { UP, DOWN }
+
   private static final Log LOG =
   LogFactory.getLog(Resources.class);
 
@@ -305,17 +307,7 @@ public class Resources {
   }
 
   public static Resource multiplyTo(Resource lhs, double by) {
-int maxLength = ResourceUtils.getNumberOfCountableResourceTypes();
-for (int i = 0; i < maxLength; i++) {
-  try {
-ResourceInformation lhsValue = lhs.getResourceInformation(i);
-lhs.setResourceValue(i, (long) (lhsValue.getValue() * by));
-  } catch (ResourceNotFoundException ye) {
-LOG.warn("Resource is missing:" + ye.getMessage());
-continue;
-  }
-}
-return lhs;
+return multiplyAndRound(lhs, by, RoundingDirection.DOWN);
   }
 
   public static Resource multiply(Resource lhs, double by) {
@@ -338,7 +330,6 @@ public class Resources {
 lhs.setResourceValue(i, lhsValue.getValue() + convertedRhs);
   } catch (ResourceNotFoundException ye) {
 LOG.warn("Resource is missing:" + ye.getMessage());
-continue;
   }
 }
 return lhs;
@@ -358,29 +349,58 @@ public class Resources {
   ResourceCalculator calculator,Resource lhs, double by, Resource factor) {
 return calculator.multiplyAndNormalizeDown(lhs, by, factor);
   }
-  
+
+  /**
+   * Multiply {@code lhs} by {@code by}, and set the result rounded down into a
+   * cloned version of {@code lhs} Resource object.
+   * @param lhs Resource object
+   * @param by Multiply values by this value
+   * @return A cloned version of {@code lhs} with updated values
+   */
   public static Resource multiplyAndRoundDown(Resource lhs, double by) {
-Resource out = clone(lhs);
+return multiplyAndRound(clone(lhs), by, RoundingDirection.DOWN);
+  }
+
+  /**
+   * Multiply {@code lhs} by {@code by}, and set the result rounded up into a
+   * cloned version of {@code lhs} Resource object.
+   * @param lhs Resource object
+   * @param by Multiply values by this value
+   * @return A cloned version of {@code lhs} with updated values
+   */
+  public static Resource multiplyAndRoundUp(Resource lhs, double by) {
+return multiplyAndRound(clone(lhs), by, RoundingDirection.UP);
+  }
+
+  /**
+   * Multiply {@code lhs} by {@code by}, and set the result according to
+   * the rounding direction to {@code lhs}
+   * without creating any new {@link Resource} object.
+   * @param lhs Resource object
+   * @param by Multiply values by this value
+   * @return Returns {@code lhs} itself (without cloning) with updated values
+   */
+  private static Resource multiplyAndRound(Resource lhs, double by,
+  RoundingDirection roundingDirection) {
 int maxLength = ResourceUtils.getNumberOfCountableResourceTypes();
 for (int i = 0; i < maxLength; i++) {
   try {
 ResourceInformation lhsValue = lhs.getResourceInformation(i);
-out.setResourceValue(i, (long) (lhsValue.getValue() * by));
+
+final long value;
+if (roundingDirection == RoundingDirection.DOWN) {
+  value = (long) (lhsValue.getValue() * by);
+} else {
+  value = (long) Math.ceil(lhsValue.getValue() * by);
+}
+lhs.setResourceValue(i, value);
   } catch (ResourceNotFoundException ye) {
 LOG.warn("Resource is missing:" + ye.getMessage());
-continue;
   }
 }
-return ou

[hadoop] branch trunk updated: HDFS-14273. Fix checkstyle issues in BlockLocation's method javadoc (Contributed by Shweta Yakkali via Daniel Templeton)

2019-02-20 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 371a6db  HDFS-14273. Fix checkstyle issues in BlockLocation's method 
javadoc (Contributed by Shweta Yakkali via Daniel Templeton)
371a6db is described below

commit 371a6db59ad8891cf0b5101fadee74d31ea2a895
Author: Shweta Yakkali 
AuthorDate: Wed Feb 20 15:36:14 2019 -0800

HDFS-14273. Fix checkstyle issues in BlockLocation's method javadoc
(Contributed by Shweta Yakkali via Daniel Templeton)

Change-Id: I546aa4a0fe7f83b53735acd9925f366b2f1a00e2
---
 .../java/org/apache/hadoop/fs/BlockLocation.java   | 36 +++---
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index 37f0309..c6dde52 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -77,14 +77,14 @@ public class BlockLocation implements Serializable {
   new StorageType[0];
 
   /**
-   * Default Constructor
+   * Default Constructor.
*/
   public BlockLocation() {
 this(EMPTY_STR_ARRAY, EMPTY_STR_ARRAY, 0L, 0L);
   }
 
   /**
-   * Copy constructor
+   * Copy constructor.
*/
   public BlockLocation(BlockLocation that) {
 this.hosts = that.hosts;
@@ -99,7 +99,7 @@ public class BlockLocation implements Serializable {
   }
 
   /**
-   * Constructor with host, name, offset and length
+   * Constructor with host, name, offset and length.
*/
   public BlockLocation(String[] names, String[] hosts, long offset, 
long length) {
@@ -107,7 +107,7 @@ public class BlockLocation implements Serializable {
   }
 
   /**
-   * Constructor with host, name, offset, length and corrupt flag
+   * Constructor with host, name, offset, length and corrupt flag.
*/
   public BlockLocation(String[] names, String[] hosts, long offset, 
long length, boolean corrupt) {
@@ -115,7 +115,7 @@ public class BlockLocation implements Serializable {
   }
 
   /**
-   * Constructor with host, name, network topology, offset and length
+   * Constructor with host, name, network topology, offset and length.
*/
   public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
long offset, long length) {
@@ -124,7 +124,7 @@ public class BlockLocation implements Serializable {
 
   /**
* Constructor with host, name, network topology, offset, length 
-   * and corrupt flag
+   * and corrupt flag.
*/
   public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
long offset, long length, boolean corrupt) {
@@ -176,21 +176,21 @@ public class BlockLocation implements Serializable {
   }
 
   /**
-   * Get the list of hosts (hostname) hosting this block
+   * Get the list of hosts (hostname) hosting this block.
*/
   public String[] getHosts() throws IOException {
 return hosts;
   }
 
   /**
-   * Get the list of hosts (hostname) hosting a cached replica of the block
+   * Get the list of hosts (hostname) hosting a cached replica of the block.
*/
   public String[] getCachedHosts() {
-   return cachedHosts;
+return cachedHosts;
   }
 
   /**
-   * Get the list of names (IP:xferPort) hosting this block
+   * Get the list of names (IP:xferPort) hosting this block.
*/
   public String[] getNames() throws IOException {
 return names;
@@ -219,14 +219,14 @@ public class BlockLocation implements Serializable {
   }
 
   /**
-   * Get the start offset of file associated with this block
+   * Get the start offset of file associated with this block.
*/
   public long getOffset() {
 return offset;
   }
   
   /**
-   * Get the length of the block
+   * Get the length of the block.
*/
   public long getLength() {
 return length;
@@ -247,14 +247,14 @@ public class BlockLocation implements Serializable {
   }
 
   /**
-   * Set the start offset of file associated with this block
+   * Set the start offset of file associated with this block.
*/
   public void setOffset(long offset) {
 this.offset = offset;
   }
 
   /**
-   * Set the length of block
+   * Set the length of block.
*/
   public void setLength(long length) {
 this.length = length;
@@ -268,7 +268,7 @@ public class BlockLocation implements Serializable {
   }
 
   /**
-   * Set the hosts hosting this block
+   * Set the hosts hosting this block.
*/
   public void setHosts(String[] hosts) throws IOException {
 if (hosts == null) {
@@ -279,7 +279,7 @@ public class BlockLocation implements Serializable

[hadoop] branch trunk updated: HDFS-14185. Cleanup method calls to static Assert methods in TestAddStripedBlocks (Contributed by Shweta Yakkali via Daniel Templeton)

2019-01-23 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f3e642d  HDFS-14185. Cleanup method calls to static Assert methods in 
TestAddStripedBlocks (Contributed by Shweta Yakkali via Daniel Templeton)
f3e642d is described below

commit f3e642d92bcc4ca4a6f88ad0ca04eeeda2f2f529
Author: shwetayakk...@cloudera.com 
AuthorDate: Wed Jan 23 15:50:26 2019 -0800

HDFS-14185. Cleanup method calls to static Assert methods in 
TestAddStripedBlocks
(Contributed by Shweta Yakkali via Daniel Templeton)

Change-Id: I0d533f575a405ed3affd05994a4208bde7d9cbe9
---
 .../hdfs/server/namenode/TestAddStripedBlocks.java | 92 +++---
 1 file changed, 46 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index 348301c..d17a36f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -53,7 +53,6 @@ import 
org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Rule;
@@ -68,6 +67,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAUL
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertArrayEquals;
 
 public class TestAddStripedBlocks {
   private final ErasureCodingPolicy ecPolicy =
@@ -118,7 +118,7 @@ public class TestAddStripedBlocks {
   final List dnList = new ArrayList<>();
   fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, 
false);
   for (DatanodeDescriptor dn : dnList) {
-Assert.assertEquals(1, dn.getBlocksScheduled());
+assertEquals(1, dn.getBlocksScheduled());
   }
 }
 
@@ -131,7 +131,7 @@ public class TestAddStripedBlocks {
 final List dnList = new ArrayList<>();
 fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, 
false);
 for (DatanodeDescriptor dn : dnList) {
-  Assert.assertEquals(0, dn.getBlocksScheduled());
+  assertEquals(0, dn.getBlocksScheduled());
 }
   }
 
@@ -153,7 +153,7 @@ public class TestAddStripedBlocks {
 DFSTestUtil.writeFile(dfs, testPath, "hello again");
 lb = dfs.getClient().getLocatedBlocks(testPath.toString(), 0);
 final long secondId = lb.get(0).getBlock().getBlockId();
-Assert.assertEquals(firstId + HdfsServerConstants.MAX_BLOCKS_IN_GROUP, 
secondId);
+assertEquals(firstId + HdfsServerConstants.MAX_BLOCKS_IN_GROUP, secondId);
   }
 
   private static void writeAndFlushStripedOutputStream(
@@ -180,7 +180,7 @@ public class TestAddStripedBlocks {
 
   BlockInfo[] blocks = fileNode.getBlocks();
   assertEquals(1, blocks.length);
-  Assert.assertTrue(blocks[0].isStriped());
+  assertTrue(blocks[0].isStriped());
 
   checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), true);
 
@@ -190,7 +190,7 @@ public class TestAddStripedBlocks {
   fileNode = fsdir.getINode4Write(file.toString()).asFile();
   blocks = fileNode.getBlocks();
   assertEquals(1, blocks.length);
-  Assert.assertTrue(blocks[0].isStriped());
+  assertTrue(blocks[0].isStriped());
   checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false);
 
   // save namespace, restart namenode, and check
@@ -203,7 +203,7 @@ public class TestAddStripedBlocks {
   fileNode = fsdir.getINode4Write(file.toString()).asFile();
   blocks = fileNode.getBlocks();
   assertEquals(1, blocks.length);
-  Assert.assertTrue(blocks[0].isStriped());
+  assertTrue(blocks[0].isStriped());
   checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false);
 } finally {
   IOUtils.cleanup(null, out);
@@ -213,21 +213,21 @@ public class TestAddStripedBlocks {
   private void checkStripedBlockUC(BlockInfoStriped block,
   boolean checkReplica) {
 assertEquals(0, block.numNodes());
-Assert.assertFalse(block.isComplete());
-Assert.assertEquals(dataBlocks, block.getDataBlockNum());
-Assert.assertEquals(parityBlocks, block.getParityBlockNum());
-Assert.assertEquals(0,
+assertFalse(block.isComplete());
+assertEquals(dataBlocks, block.getDataBlockNum());
+assertEquals(parityBlocks, b

[hadoop] branch trunk updated: HDFS-14132. Add BlockLocation.isStriped() to determine if block is replicated or Striped (Contributed by Shweta Yakkali via Daniel Templeton)

2019-01-08 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4ab5260  HDFS-14132. Add BlockLocation.isStriped() to determine if 
block is replicated or Striped (Contributed by Shweta Yakkali via Daniel 
Templeton)
4ab5260 is described below

commit 4ab5260b7e8d355a042f1533bc020868778b3231
Author: shwetayakk...@cloudera.com 
AuthorDate: Tue Jan 8 17:04:10 2019 -0800

HDFS-14132. Add BlockLocation.isStriped() to determine if block is 
replicated or Striped
(Contributed by Shweta Yakkali via Daniel Templeton)

Change-Id: I0ed8996a0bae2ad2c7d3513143195533f7191af8
---
 .../java/org/apache/hadoop/fs/BlockLocation.java   |  7 +++
 .../org/apache/hadoop/fs/HdfsBlockLocation.java|  4 
 .../hdfs/server/namenode/TestAddStripedBlocks.java | 24 ++
 3 files changed, 35 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index 4dae233..ae134c4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -240,6 +240,13 @@ public class BlockLocation implements Serializable {
   }
 
   /**
+   * Return true if the block is striped (erasure coded).
+   */
+  public boolean isStriped() {
+return false;
+  }
+
+  /**
* Set the start offset of file associated with this block
*/
   public void setOffset(long offset) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
index 2ee7f41..47ba64b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
@@ -54,4 +54,8 @@ public class HdfsBlockLocation extends BlockLocation 
implements Serializable {
 block = null;
   }
 
+  @Override
+  public boolean isStriped() {
+return block.isStriped();
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index 9374c04..348301c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSStripedOutputStream;
@@ -65,6 +66,8 @@ import java.util.UUID;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class TestAddStripedBlocks {
   private final ErasureCodingPolicy ecPolicy =
@@ -476,4 +479,25 @@ public class TestAddStripedBlocks {
 Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
   }
 
+  @Test
+  public void testStripedFlagInBlockLocation() throws IOException {
+Path replicated = new Path("/blockLocation/replicated");
+try (FSDataOutputStream out =
+dfs.createFile(replicated).replicate().recursive().build()) {
+  out.write("this is a replicated file".getBytes());
+}
+BlockLocation[] locations = dfs.getFileBlockLocations(replicated, 0, 100);
+assertEquals("There should be exactly one Block present",
+1, locations.length);
+assertFalse("The file is Striped", locations[0].isStriped());
+
+Path striped = new Path("/blockLocation/striped");
+try (FSDataOutputStream out = dfs.createFile(striped).recursive().build()) 
{
+  out.write("this is a striped file".getBytes());
+}
+locations = dfs.getFileBlockLocations(striped, 0, 100);
+assertEquals("There should be exactly one Block present",
+1, locations.length);
+assertTrue("The file is not Striped", locations[0].isStriped());
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14121. Log message about the old hosts file format is misleading (Contributed by Zsolt Venczel via Daniel Templeton)

2018-12-14 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk ca379e1c4 -> aa1285989


HDFS-14121. Log message about the old hosts file format is misleading
(Contributed by Zsolt Venczel via Daniel Templeton)

Change-Id: I7ff548f6c82e0aeb08a7a50ca7c2c827db8726bb


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa128598
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa128598
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa128598

Branch: refs/heads/trunk
Commit: aa1285989092bc253c45b7a83acec2e9bce2c5dc
Parents: ca379e1
Author: Zsolt Venczel 
Authored: Fri Dec 14 13:02:45 2018 +0100
Committer: Daniel Templeton 
Committed: Fri Dec 14 13:54:57 2018 +0100

--
 .../hdfs/util/CombinedHostsFileReader.java  | 41 ++--
 1 file changed, 29 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa128598/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index aa8e4c1..be1f6d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -23,6 +23,7 @@ import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
 
+import java.io.File;
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
@@ -61,26 +62,37 @@ public final class CombinedHostsFileReader {
   private CombinedHostsFileReader() {
   }
 
+  private static final String REFER_TO_DOC_MSG = " For the correct JSON" +
+  " format please refer to the documentation (https://hadoop.apache; +
+  ".org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDataNodeAd" +
+  "minGuide.html#JSON-based_configuration)";
+
   /**
* Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from
+   * @param hostsFilePath the input json file to read from
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
   public static DatanodeAdminProperties[]
-  readFile(final String hostsFile) throws IOException {
+  readFile(final String hostsFilePath) throws IOException {
 DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
 ObjectMapper objectMapper = new ObjectMapper();
+File hostFile = new File(hostsFilePath);
 boolean tryOldFormat = false;
-try (Reader input =
-new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-  allDNs = objectMapper.readValue(input, DatanodeAdminProperties[].class);
-} catch (JsonMappingException jme) {
-  // The old format doesn't have json top-level token to enclose the array.
-  // For backward compatibility, try parsing the old format.
-  tryOldFormat = true;
-  LOG.warn("{} has invalid JSON format." +
-  "Try the old format without top-level token defined.", hostsFile);
+
+if (hostFile.length() > 0) {
+  try (Reader input =
+   new InputStreamReader(new FileInputStream(hostFile),
+   "UTF-8")) {
+allDNs = objectMapper.readValue(input, 
DatanodeAdminProperties[].class);
+  } catch (JsonMappingException jme) {
+// The old format doesn't have json top-level token to enclose
+// the array.
+// For backward compatibility, try parsing the old format.
+tryOldFormat = true;
+  }
+} else {
+  LOG.warn(hostsFilePath + " is empty." + REFER_TO_DOC_MSG);
 }
 
 if (tryOldFormat) {
@@ -89,13 +101,18 @@ public final class CombinedHostsFileReader {
   JsonFactory jsonFactory = new JsonFactory();
   List all = new ArrayList<>();
   try (Reader input =
-  new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
+  new InputStreamReader(new FileInputStream(hostsFilePath),
+  "UTF-8")) {
 Iterator iterator =
 objectReader.readValues(jsonFactory.createParser(input));
 while (iterator.hasNext()) {
   DatanodeAdminProperties properties = iterator.next();
   all.add(properties);
 }
+LOG.warn(hostsFilePath + " has legacy JSON format." + 
REFER_TO_DOC_MSG);
+  } catch (Throwable ex) {
+

hadoop git commit: HDFS-13985. Clearer error message for ReplicaNotFoundException (Contributed by Adam Antal via Daniel Templeton)

2018-12-13 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 92d44b2ad -> 4aa0609fb


HDFS-13985. Clearer error message for ReplicaNotFoundException
(Contributed by Adam Antal via Daniel Templeton)

Change-Id: I68ae7a5bedecbc204c161fc3b3ce8878fe5d493d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4aa0609f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4aa0609f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4aa0609f

Branch: refs/heads/trunk
Commit: 4aa0609fb0c38494bae6c7f8aa6732e9a421b27c
Parents: 92d44b2
Author: Daniel Templeton 
Authored: Thu Dec 13 16:20:36 2018 +0100
Committer: Daniel Templeton 
Committed: Thu Dec 13 16:21:45 2018 +0100

--
 .../hdfs/server/datanode/ReplicaNotFoundException.java   | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa0609f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
index 946950c..b2f170b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
@@ -40,13 +40,19 @@ public class ReplicaNotFoundException extends IOException {
   "Replica does not exist ";
   public final static String UNEXPECTED_GS_REPLICA =
   "Cannot append to a replica with unexpected generation stamp ";
+  public final static String POSSIBLE_ROOT_CAUSE_MSG =
+  ". The block may have been removed recently by the balancer " +
+  "or by intentionally reducing the replication factor. " +
+  "This condition is usually harmless. To be certain, please check the " +
+  "preceding datanode log messages for signs of a more serious issue.";
+
 
   public ReplicaNotFoundException() {
 super();
   }
 
   public ReplicaNotFoundException(ExtendedBlock b) {
-super("Replica not found for " + b);
+super("Replica not found for " + b + POSSIBLE_ROOT_CAUSE_MSG);
   }
 
   public ReplicaNotFoundException(String msg) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1846970 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2018-11-19 Thread templedf
Author: templedf
Date: Tue Nov 20 03:29:28 2018
New Revision: 1846970

URL: http://svn.apache.org/viewvc?rev=1846970=rev
Log:
Added Daniel Templeton as committer and PMC

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1846970=1846969=1846970=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Tue 
Nov 20 03:29:28 2018
@@ -504,6 +504,14 @@
 
 
 
+  templedf
+  Daniel Templeton
+  Cloudera
+  
+  -8
+
+
+
   tgraves
   Thomas Graves
   Oath
@@ -1549,6 +1557,14 @@

 

+ templedf
+ Daniel Templeton
+ Cloudera
+ 
+ -8
+   
+
+   
  tgraves
  Thomas Graves
  Oath



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14015. Improve error handling in hdfsThreadDestructor in native thread local storage

2018-11-16 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 29374999b -> e56d9f261


HDFS-14015. Improve error handling in hdfsThreadDestructor in native thread 
local storage

Change-Id: Ida1e888c9231b9e46081338e3a206d8f6faabd36


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e56d9f26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e56d9f26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e56d9f26

Branch: refs/heads/trunk
Commit: e56d9f261866bfa456590061d7e3f360f22c5a86
Parents: 2937499
Author: Daniel Templeton 
Authored: Fri Nov 16 16:24:10 2018 -0800
Committer: Daniel Templeton 
Committed: Fri Nov 16 16:24:40 2018 -0800

--
 .../libhdfs/os/posix/thread_local_storage.c | 65 +++-
 .../libhdfs/os/windows/thread_local_storage.c   | 63 ++-
 2 files changed, 123 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56d9f26/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
index e6b59d6..22e2fcf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
@@ -23,12 +23,17 @@
 #include 
 #include 
 
+#define UNKNOWN "UNKNOWN"
+#define MAXTHRID 256
+
 /** Key that allows us to retrieve thread-local storage */
 static pthread_key_t gTlsKey;
 
 /** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex 
*/
 static int gTlsKeyInitialized = 0;
 
+static void get_current_thread_id(JNIEnv* env, char* id, int max);
+
 /**
  * The function that is called whenever a thread with libhdfs thread local data
  * is destroyed.
@@ -41,16 +46,26 @@ void hdfsThreadDestructor(void *v)
   struct ThreadLocalState *state = (struct ThreadLocalState*)v;
   JNIEnv *env = state->env;;
   jint ret;
+  char thr_name[MAXTHRID];
 
   /* Detach the current thread from the JVM */
   if (env) {
 ret = (*env)->GetJavaVM(env, );
-if (ret) {
+
+if (ret != 0) {
   fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
 ret);
   (*env)->ExceptionDescribe(env);
 } else {
-  (*vm)->DetachCurrentThread(vm);
+  ret = (*vm)->DetachCurrentThread(vm);
+
+  if (ret != JNI_OK) {
+(*env)->ExceptionDescribe(env);
+get_current_thread_id(env, thr_name, MAXTHRID);
+
+fprintf(stderr, "hdfsThreadDestructor: Unable to detach thread %s "
+"from the JVM. Error code: %d\n", thr_name, ret);
+  }
 }
   }
 
@@ -62,13 +77,57 @@ void hdfsThreadDestructor(void *v)
   free(state);
 }
 
+static void get_current_thread_id(JNIEnv* env, char* id, int max) {
+  jclass cls;
+  jmethodID mth;
+  jobject thr;
+  jstring thr_name;
+  jlong thr_id = 0;
+  const char *thr_name_str;
+
+  cls = (*env)->FindClass(env, "java/lang/Thread");
+  mth = (*env)->GetStaticMethodID(env, cls, "currentThread",
+  "()Ljava/lang/Thread;");
+  thr = (*env)->CallStaticObjectMethod(env, cls, mth);
+
+  if (thr != NULL) {
+mth = (*env)->GetMethodID(env, cls, "getId", "()J");
+thr_id = (*env)->CallLongMethod(env, thr, mth);
+(*env)->ExceptionDescribe(env);
+
+mth = (*env)->GetMethodID(env, cls, "toString", "()Ljava/lang/String;");
+thr_name = (jstring)(*env)->CallObjectMethod(env, thr, mth);
+
+if (thr_name != NULL) {
+  thr_name_str = (*env)->GetStringUTFChars(env, thr_name, NULL);
+
+  // Treating the jlong as a long *should* be safe
+  snprintf(id, max, "%s:%ld", thr_name_str, thr_id);
+
+  // Release the char*
+  (*env)->ReleaseStringUTFChars(env, thr_name, thr_name_str);
+} else {
+  (*env)->ExceptionDescribe(env);
+
+  // Treating the jlong as a long *should* be safe
+  snprintf(id, max, "%s:%ld", UNKNOWN, thr_id);
+}
+  } else {
+(*env)->ExceptionDescribe(env);
+snprintf(id, max, "%s", UNKNOWN);
+  }
+
+  // Make sure the id is null terminated in case we overflow the max length
+  id[max - 1] = '\0';
+}
+
 struct ThreadLocalState* threadLocalStorageCreate()
 {
   struct ThreadLocalState *state;
   state = (struct ThreadLocalState*)malloc(sizeof(struct ThreadLocalState));
   if (state == NULL) {
 fprintf(stderr,
-  "threadLocalStorageSet: OOM - Unable to allocate thread local state\n");
+  "threadLocalStorageCreate: OOM - Unable to allocate thread local 
state\n");
 return NULL;
   }
   

hadoop git commit: HDFS-14047. [libhdfs++] Fix hdfsGetLastExceptionRootCause bug in test_libhdfs_threaded.c (Contributed by Antoli Shein via Daniel Templeton)

2018-11-06 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab6aa4c72 -> 349168c4b


HDFS-14047. [libhdfs++] Fix hdfsGetLastExceptionRootCause bug in 
test_libhdfs_threaded.c
(Contributed by Antoli Shein via Daniel Templeton)

Change-Id: Ia5546fd5f8270f285c373b9023a9bd9835059845


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/349168c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/349168c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/349168c4

Branch: refs/heads/trunk
Commit: 349168c4b34275d768e35e52bd3790a809f674a2
Parents: ab6aa4c
Author: Daniel Templeton 
Authored: Mon Nov 5 16:42:29 2018 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 6 09:22:27 2018 -0800

--
 .../hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/349168c4/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
index dbaf1f6..bcbb851 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
@@ -130,7 +130,7 @@ static char* getExceptionUtilString(JNIEnv *env, jthrowable 
exc, char *methodNam
 jstring jStr = NULL;
 char *excString = NULL;
 jthr = invokeMethod(env, , STATIC, NULL,
-"org/apache/commons/lang/exception/ExceptionUtils",
+"org/apache/commons/lang3/exception/ExceptionUtils",
 methodName, "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
 if (jthr) {
 destroyLocalReference(env, jthr);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15823. ABFS: Stop requiring client ID and tenant ID for MSI (Contributed by Da Zhou via Daniel Templeton)

2018-10-24 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 69b328943 -> e37458447


HADOOP-15823. ABFS: Stop requiring client ID and tenant ID for MSI
(Contributed by Da Zhou via Daniel Templeton)

Change-Id: I546ab3a1df1efec635c08c388148e718dc4a9843


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3745844
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3745844
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3745844

Branch: refs/heads/trunk
Commit: e374584479b687e41d5379bb6d827dcae620e123
Parents: 69b3289
Author: Daniel Templeton 
Authored: Wed Oct 24 14:18:35 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Oct 24 14:18:35 2018 -0700

--
 .../hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java   | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3745844/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java
index 97415ce..df7b199 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java
@@ -110,9 +110,6 @@ public final class AzureADAuthenticator {
*/
   public static AzureADToken getTokenFromMsi(String tenantGuid, String 
clientId,
  boolean bypassCache) throws 
IOException {
-Preconditions.checkNotNull(tenantGuid, "tenantGuid");
-Preconditions.checkNotNull(clientId, "clientId");
-
 String authEndpoint = 
"http://169.254.169.254/metadata/identity/oauth2/token;;
 
 QueryParams qp = new QueryParams();
@@ -120,12 +117,12 @@ public final class AzureADAuthenticator {
 qp.add("resource", RESOURCE_NAME);
 
 
-if (tenantGuid.length() > 0) {
+if (tenantGuid != null && tenantGuid.length() > 0) {
   String authority = "https://login.microsoftonline.com/; + tenantGuid;
   qp.add("authority", authority);
 }
 
-if (clientId.length() > 0) {
+if (clientId != null && clientId.length() > 0) {
   qp.add("client_id", clientId);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13846. Safe blocks counter is not decremented correctly if the block is striped (Contributed by Kitti Nanasi via Daniel Templeton)

2018-09-12 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1f6c4545c -> 78bd3b1db


HDFS-13846. Safe blocks counter is not decremented correctly if the block is 
striped
(Contributed by Kitti Nanasi via Daniel Templeton)

Change-Id: Id41747a67dc946fdf0dbde90643bb1ea7e9e0f70


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78bd3b1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78bd3b1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78bd3b1d

Branch: refs/heads/trunk
Commit: 78bd3b1db9dc9eb533c2379ee71f133ecfc5cdeb
Parents: 1f6c454
Author: Daniel Templeton 
Authored: Wed Sep 12 11:18:55 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Sep 12 11:18:55 2018 -0700

--
 .../blockmanagement/BlockManagerSafeMode.java   | 20 +++--
 .../TestBlockManagerSafeMode.java   | 81 +++-
 2 files changed, 76 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78bd3b1d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
index 8de17ef..5a981e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
@@ -418,8 +418,10 @@ class BlockManagerSafeMode {
   }
 
   /**
-   * Increment number of safe blocks if current block has reached minimal
-   * replication.
+   * Increment number of safe blocks if the current block is contiguous
+   * and it has reached minimal replication or
+   * if the current block is striped and the number of its actual data blocks
+   * reaches the number of data units specified by the erasure coding policy.
* If safe mode is not currently on, this is a no-op.
* @param storageNum  current number of replicas or number of internal blocks
*of a striped block group
@@ -433,9 +435,9 @@ class BlockManagerSafeMode {
   return;
 }
 
-final int safe = storedBlock.isStriped() ?
+final int safeNumberOfNodes = storedBlock.isStriped() ?
 ((BlockInfoStriped)storedBlock).getRealDataBlockNum() : 
safeReplication;
-if (storageNum == safe) {
+if (storageNum == safeNumberOfNodes) {
   this.blockSafe++;
 
   // Report startup progress only if we haven't completed startup yet.
@@ -453,8 +455,10 @@ class BlockManagerSafeMode {
   }
 
   /**
-   * Decrement number of safe blocks if current block has fallen below minimal
-   * replication.
+   * Decrement number of safe blocks if the current block is contiguous
+   * and it has just fallen below minimal replication or
+   * if the current block is striped and its actual data blocks has just fallen
+   * below the number of data units specified by erasure coding policy.
* If safe mode is not currently on, this is a no-op.
*/
   synchronized void decrementSafeBlockCount(BlockInfo b) {
@@ -463,9 +467,11 @@ class BlockManagerSafeMode {
   return;
 }
 
+final int safeNumberOfNodes = b.isStriped() ?
+((BlockInfoStriped)b).getRealDataBlockNum() : safeReplication;
 BlockInfo storedBlock = blockManager.getStoredBlock(b);
 if (storedBlock.isComplete() &&
-blockManager.countNodes(b).liveReplicas() == safeReplication - 1) {
+blockManager.countNodes(b).liveReplicas() == safeNumberOfNodes - 1) {
   this.blockSafe--;
   assert blockSafe >= 0;
   checkSafeMode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78bd3b1d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
index 866b533..6ad530d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
@@ -237,15 +237,7 @@ public class TestBlockManagerSafeMode {
   BlockInfo blockInfo = mock(BlockInfo.class);
   

hadoop git commit: HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD (Contributed by Laszlo Kollar via Daniel Templeton)

2018-08-30 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fa32269ce -> 9a02c4ff6


HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD
(Contributed by Laszlo Kollar via Daniel Templeton)

Change-Id: I6e2459d0700df7f3bad4eac8297a11690191c3ba
(cherry picked from commit f2c2a68ec208f640e778fc41f95f0284fcc44729)
(cherry picked from commit 68301ad7d36d516e265d34dd47c344239e3c6c7d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a02c4ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a02c4ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a02c4ff

Branch: refs/heads/branch-3.0
Commit: 9a02c4ff6566c14b8da76bd698b8fbc32b4551a9
Parents: fa32269
Author: Daniel Templeton 
Authored: Thu Aug 30 09:12:36 2018 -0700
Committer: Daniel Templeton 
Committed: Thu Aug 30 09:15:25 2018 -0700

--
 .../hadoop-common/src/site/markdown/Compatibility.md   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a02c4ff/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 54be412..1968847 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -187,7 +187,7 @@ existing documentation and tests and/or adding new 
documentation or tests.
 
  Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
 
-Apache Hadoop revisions SHOUD retain binary compatability such that end-user
+Apache Hadoop revisions SHOULD retain binary compatability such that end-user
 applications continue to work without any modifications. Minor Apache Hadoop
 revisions within the same major revision MUST retain compatibility such that
 existing MapReduce applications (e.g. end-user applications and projects such


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD (Contributed by Laszlo Kollar via Daniel Templeton)

2018-08-30 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a0766bf66 -> 68301ad7d


HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD
(Contributed by Laszlo Kollar via Daniel Templeton)

Change-Id: I6e2459d0700df7f3bad4eac8297a11690191c3ba
(cherry picked from commit f2c2a68ec208f640e778fc41f95f0284fcc44729)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68301ad7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68301ad7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68301ad7

Branch: refs/heads/branch-3.1
Commit: 68301ad7d36d516e265d34dd47c344239e3c6c7d
Parents: a0766bf
Author: Daniel Templeton 
Authored: Thu Aug 30 09:12:36 2018 -0700
Committer: Daniel Templeton 
Committed: Thu Aug 30 09:14:29 2018 -0700

--
 .../hadoop-common/src/site/markdown/Compatibility.md   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68301ad7/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 6b17c62..03d162a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -187,7 +187,7 @@ existing documentation and tests and/or adding new 
documentation or tests.
 
  Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
 
-Apache Hadoop revisions SHOUD retain binary compatability such that end-user
+Apache Hadoop revisions SHOULD retain binary compatability such that end-user
 applications continue to work without any modifications. Minor Apache Hadoop
 revisions within the same major revision MUST retain compatibility such that
 existing MapReduce applications (e.g. end-user applications and projects such


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD (Contributed by Laszlo Kollar via Daniel Templeton)

2018-08-30 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5a0babf76 -> f2c2a68ec


HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD
(Contributed by Laszlo Kollar via Daniel Templeton)

Change-Id: I6e2459d0700df7f3bad4eac8297a11690191c3ba


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2c2a68e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2c2a68e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2c2a68e

Branch: refs/heads/trunk
Commit: f2c2a68ec208f640e778fc41f95f0284fcc44729
Parents: 5a0babf
Author: Daniel Templeton 
Authored: Thu Aug 30 09:12:36 2018 -0700
Committer: Daniel Templeton 
Committed: Thu Aug 30 09:12:36 2018 -0700

--
 .../hadoop-common/src/site/markdown/Compatibility.md   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2c2a68e/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 6b17c62..03d162a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -187,7 +187,7 @@ existing documentation and tests and/or adding new 
documentation or tests.
 
  Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
 
-Apache Hadoop revisions SHOUD retain binary compatability such that end-user
+Apache Hadoop revisions SHOULD retain binary compatability such that end-user
 applications continue to work without any modifications. Minor Apache Hadoop
 revisions within the same major revision MUST retain compatibility such that
 existing MapReduce applications (e.g. end-user applications and projects such


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15705. Typo in the definition of "stable" in the interface classification

2018-08-29 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 3b0127961 -> fbaa11ef4


HADOOP-15705. Typo in the definition of "stable" in the interface classification

Change-Id: I3eae2143400a534903db4f186400561fc8d2bd56
(cherry picked from commit d53a10b0a552155de700e396fd7f450a4c5f9c22)
(cherry picked from commit 90c8cca78389f534429ebf81576ce7ebba9356f7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbaa11ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbaa11ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbaa11ef

Branch: refs/heads/branch-3.0
Commit: fbaa11ef4cde64a410ac0f54693ffa343ad61028
Parents: 3b01279
Author: Daniel Templeton 
Authored: Wed Aug 29 13:59:32 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Aug 29 14:01:19 2018 -0700

--
 .../hadoop-common/src/site/markdown/InterfaceClassification.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa11ef/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index a21e28b..7348044 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -124,7 +124,7 @@ hence serves as a safe development target. A Stable 
interface may evolve
 compatibly between minor releases.
 
 Incompatible changes allowed: major (X.0.0)
-Compatible changes allowed: maintenance (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
  Evolving
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15705. Typo in the definition of "stable" in the interface classification

2018-08-29 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b8618556e -> 90c8cca78


HADOOP-15705. Typo in the definition of "stable" in the interface classification

Change-Id: I3eae2143400a534903db4f186400561fc8d2bd56
(cherry picked from commit d53a10b0a552155de700e396fd7f450a4c5f9c22)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90c8cca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90c8cca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90c8cca7

Branch: refs/heads/branch-3.1
Commit: 90c8cca78389f534429ebf81576ce7ebba9356f7
Parents: b861855
Author: Daniel Templeton 
Authored: Wed Aug 29 13:59:32 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Aug 29 14:00:42 2018 -0700

--
 .../hadoop-common/src/site/markdown/InterfaceClassification.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90c8cca7/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index a21e28b..7348044 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -124,7 +124,7 @@ hence serves as a safe development target. A Stable 
interface may evolve
 compatibly between minor releases.
 
 Incompatible changes allowed: major (X.0.0)
-Compatible changes allowed: maintenance (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
  Evolving
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15705. Typo in the definition of "stable" in the interface classification

2018-08-29 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 692736f7c -> d53a10b0a


HADOOP-15705. Typo in the definition of "stable" in the interface classification

Change-Id: I3eae2143400a534903db4f186400561fc8d2bd56


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d53a10b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d53a10b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d53a10b0

Branch: refs/heads/trunk
Commit: d53a10b0a552155de700e396fd7f450a4c5f9c22
Parents: 692736f
Author: Daniel Templeton 
Authored: Wed Aug 29 13:59:32 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Aug 29 13:59:32 2018 -0700

--
 .../hadoop-common/src/site/markdown/InterfaceClassification.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d53a10b0/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index a21e28b..7348044 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -124,7 +124,7 @@ hence serves as a safe development target. A Stable 
interface may evolve
 compatibly between minor releases.
 
 Incompatible changes allowed: major (X.0.0)
-Compatible changes allowed: maintenance (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
  Evolving
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13746. Still occasional "Should be different group" failure in TestRefreshUserMappings#testGroupMappingRefresh (Contributed by Siyao Meng via Daniel Templeton)

2018-08-16 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 8c0cecb03 -> 27508086d


HDFS-13746. Still occasional "Should be different group" failure in 
TestRefreshUserMappings#testGroupMappingRefresh
(Contributed by Siyao Meng via Daniel Templeton)

Change-Id: I9fad1537ace38367a463d9fe67aaa28d3178fc69
(cherry picked from commit 8512e1a91be3e340d919c7cdc9c09dfb762a6a4e)
(cherry picked from commit fbedf89377e540fb10239a880fc2e01ef7021b93)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27508086
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27508086
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27508086

Branch: refs/heads/branch-3.0
Commit: 27508086de5c82f8b656601fc804ff5d55fed8ad
Parents: 8c0cecb
Author: Daniel Templeton 
Authored: Thu Aug 16 13:43:49 2018 -0700
Committer: Daniel Templeton 
Committed: Thu Aug 16 15:02:18 2018 -0700

--
 .../security/TestRefreshUserMappings.java   | 51 +++-
 1 file changed, 27 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27508086/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
index 8a6c21f..d18d2c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
@@ -34,7 +34,6 @@ import java.io.UnsupportedEncodingException;
 import java.net.URL;
 import java.net.URLDecoder;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -46,6 +45,8 @@ import 
org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 import org.junit.After;
 import org.junit.Before;
@@ -53,6 +54,8 @@ import org.junit.Test;
 
 
 public class TestRefreshUserMappings {
+  private static final Logger LOG = LoggerFactory.getLogger(
+  TestRefreshUserMappings.class);
   private MiniDFSCluster cluster;
   Configuration config;
   private static final long groupRefreshTimeoutSec = 1;
@@ -119,42 +122,42 @@ public class TestRefreshUserMappings {
 Groups groups = Groups.getUserToGroupsMappingService(config);
 String user = UserGroupInformation.getCurrentUser().getUserName();
 
-System.out.println("First attempt:");
+LOG.debug("First attempt:");
 List g1 = groups.getGroups(user);
-String [] str_groups = new String [g1.size()];
-g1.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
-
-System.out.println("Second attempt, should be the same:");
+LOG.debug(g1.toString());
+
+LOG.debug("Second attempt, should be the same:");
 List g2 = groups.getGroups(user);
-g2.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
+LOG.debug(g2.toString());
 for(int i=0; i g3 = groups.getGroups(user);
-g3.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
+LOG.debug(g3.toString());
 for(int i=0; i g4 = groups.getGroups(user);
-g4.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
-for(int i=0; i {
+  List g4;
+  try {
+g4 = groups.getGroups(user);
+  } catch (IOException e) {
+return false;
+  }
+  LOG.debug(g4.toString());
+  // if g4 is the same as g3, wait and retry
+  return !g3.equals(g4);
+}, 50, Math.toIntExact(groupRefreshTimeoutSec * 1000 * 30));
   }
-  
+
   @Test
   public void testRefreshSuperUserGroupsConfiguration() throws Exception {
 final String SUPER_USER = "super_user";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13746. Still occasional "Should be different group" failure in TestRefreshUserMappings#testGroupMappingRefresh (Contributed by Siyao Meng via Daniel Templeton)

2018-08-16 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 97c193424 -> fbedf8937


HDFS-13746. Still occasional "Should be different group" failure in 
TestRefreshUserMappings#testGroupMappingRefresh
(Contributed by Siyao Meng via Daniel Templeton)

Change-Id: I9fad1537ace38367a463d9fe67aaa28d3178fc69
(cherry picked from commit 8512e1a91be3e340d919c7cdc9c09dfb762a6a4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbedf893
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbedf893
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbedf893

Branch: refs/heads/branch-3.1
Commit: fbedf89377e540fb10239a880fc2e01ef7021b93
Parents: 97c1934
Author: Daniel Templeton 
Authored: Thu Aug 16 13:43:49 2018 -0700
Committer: Daniel Templeton 
Committed: Thu Aug 16 15:01:34 2018 -0700

--
 .../security/TestRefreshUserMappings.java   | 51 +++-
 1 file changed, 27 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbedf893/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
index 0e7dfc3..2d7410a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
@@ -34,7 +34,6 @@ import java.io.UnsupportedEncodingException;
 import java.net.URL;
 import java.net.URLDecoder;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -46,6 +45,8 @@ import 
org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 import org.junit.After;
 import org.junit.Before;
@@ -53,6 +54,8 @@ import org.junit.Test;
 
 
 public class TestRefreshUserMappings {
+  private static final Logger LOG = LoggerFactory.getLogger(
+  TestRefreshUserMappings.class);
   private MiniDFSCluster cluster;
   Configuration config;
   private static final long groupRefreshTimeoutSec = 1;
@@ -119,42 +122,42 @@ public class TestRefreshUserMappings {
 Groups groups = Groups.getUserToGroupsMappingService(config);
 String user = UserGroupInformation.getCurrentUser().getUserName();
 
-System.out.println("First attempt:");
+LOG.debug("First attempt:");
 List g1 = groups.getGroups(user);
-String [] str_groups = new String [g1.size()];
-g1.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
-
-System.out.println("Second attempt, should be the same:");
+LOG.debug(g1.toString());
+
+LOG.debug("Second attempt, should be the same:");
 List g2 = groups.getGroups(user);
-g2.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
+LOG.debug(g2.toString());
 for(int i=0; i g3 = groups.getGroups(user);
-g3.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
+LOG.debug(g3.toString());
 for(int i=0; i g4 = groups.getGroups(user);
-g4.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
-for(int i=0; i {
+  List g4;
+  try {
+g4 = groups.getGroups(user);
+  } catch (IOException e) {
+return false;
+  }
+  LOG.debug(g4.toString());
+  // if g4 is the same as g3, wait and retry
+  return !g3.equals(g4);
+}, 50, Math.toIntExact(groupRefreshTimeoutSec * 1000 * 30));
   }
-  
+
   @Test
   public void testRefreshSuperUserGroupsConfiguration() throws Exception {
 final String SUPER_USER = "super_user";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13746. Still occasional "Should be different group" failure in TestRefreshUserMappings#testGroupMappingRefresh (Contributed by Siyao Meng via Daniel Templeton)

2018-08-16 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5ef29087a -> 8512e1a91


HDFS-13746. Still occasional "Should be different group" failure in 
TestRefreshUserMappings#testGroupMappingRefresh
(Contributed by Siyao Meng via Daniel Templeton)

Change-Id: I9fad1537ace38367a463d9fe67aaa28d3178fc69


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8512e1a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8512e1a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8512e1a9

Branch: refs/heads/trunk
Commit: 8512e1a91be3e340d919c7cdc9c09dfb762a6a4e
Parents: 5ef2908
Author: Daniel Templeton 
Authored: Thu Aug 16 13:43:49 2018 -0700
Committer: Daniel Templeton 
Committed: Thu Aug 16 15:00:45 2018 -0700

--
 .../security/TestRefreshUserMappings.java   | 51 +++-
 1 file changed, 27 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8512e1a9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
index 0e7dfc3..2d7410a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
@@ -34,7 +34,6 @@ import java.io.UnsupportedEncodingException;
 import java.net.URL;
 import java.net.URLDecoder;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -46,6 +45,8 @@ import 
org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 import org.junit.After;
 import org.junit.Before;
@@ -53,6 +54,8 @@ import org.junit.Test;
 
 
 public class TestRefreshUserMappings {
+  private static final Logger LOG = LoggerFactory.getLogger(
+  TestRefreshUserMappings.class);
   private MiniDFSCluster cluster;
   Configuration config;
   private static final long groupRefreshTimeoutSec = 1;
@@ -119,42 +122,42 @@ public class TestRefreshUserMappings {
 Groups groups = Groups.getUserToGroupsMappingService(config);
 String user = UserGroupInformation.getCurrentUser().getUserName();
 
-System.out.println("First attempt:");
+LOG.debug("First attempt:");
 List g1 = groups.getGroups(user);
-String [] str_groups = new String [g1.size()];
-g1.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
-
-System.out.println("Second attempt, should be the same:");
+LOG.debug(g1.toString());
+
+LOG.debug("Second attempt, should be the same:");
 List g2 = groups.getGroups(user);
-g2.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
+LOG.debug(g2.toString());
 for(int i=0; i g3 = groups.getGroups(user);
-g3.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
+LOG.debug(g3.toString());
 for(int i=0; i g4 = groups.getGroups(user);
-g4.toArray(str_groups);
-System.out.println(Arrays.toString(str_groups));
-for(int i=0; i {
+  List g4;
+  try {
+g4 = groups.getGroups(user);
+  } catch (IOException e) {
+return false;
+  }
+  LOG.debug(g4.toString());
+  // if g4 is the same as g3, wait and retry
+  return !g3.equals(g4);
+}, 50, Math.toIntExact(groupRefreshTimeoutSec * 1000 * 30));
   }
-  
+
   @Test
   public void testRefreshSuperUserGroupsConfiguration() throws Exception {
 final String SUPER_USER = "super_user";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13819. TestDirectoryScanner#testDirectoryScannerInFederatedCluster is flaky

2018-08-14 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 363bd16e3 -> 4a5006b1d


HDFS-13819. TestDirectoryScanner#testDirectoryScannerInFederatedCluster is flaky

Change-Id: I1cea6e67fcec72702ad202775dee3373261ac5cd


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a5006b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a5006b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a5006b1

Branch: refs/heads/trunk
Commit: 4a5006b1d08c19ec096b3936541672ad6a225470
Parents: 363bd16
Author: Daniel Templeton 
Authored: Tue Aug 14 17:03:10 2018 -0700
Committer: Daniel Templeton 
Committed: Tue Aug 14 17:22:15 2018 -0700

--
 .../server/datanode/TestDirectoryScanner.java   | 42 +++-
 1 file changed, 32 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a5006b1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index f792523..893fe20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -40,6 +40,7 @@ import java.util.Random;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.io.FileUtils;
@@ -312,18 +313,29 @@ public class TestDirectoryScanner {
 return id;
   }
 
-  private void scan(long totalBlocks, int diffsize, long missingMetaFile, long 
missingBlockFile,
-  long missingMemoryBlocks, long mismatchBlocks) throws IOException {
+  private void scan(long totalBlocks, int diffsize, long missingMetaFile,
+  long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks)
+  throws IOException, InterruptedException, TimeoutException {
 scan(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
  missingMemoryBlocks, mismatchBlocks, 0);
   }
 
   private void scan(long totalBlocks, int diffsize, long missingMetaFile,
   long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
-  long duplicateBlocks) throws IOException {
+  long duplicateBlocks)
+  throws IOException, InterruptedException, TimeoutException {
 scanner.reconcile();
-verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
-missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+
+GenericTestUtils.waitFor(() -> {
+  try {
+verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
+missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+  } catch (AssertionError ex) {
+return false;
+  }
+
+  return true;
+}, 50, 2000);
   }
 
   private void verifyStats(long totalBlocks, int diffsize, long 
missingMetaFile,
@@ -785,7 +797,8 @@ public class TestDirectoryScanner {
 }
   }
 
-  private float runThrottleTest(int blocks) throws IOException {
+  private float runThrottleTest(int blocks)
+  throws IOException, InterruptedException, TimeoutException {
 scanner.setRetainDiffs(true);
 scan(blocks, 0, 0, 0, 0, 0);
 scanner.shutdown();
@@ -1069,10 +1082,19 @@ public class TestDirectoryScanner {
   scanner.setRetainDiffs(true);
   scanner.reconcile();
   //Check blocks in corresponding BP
-  bpid = cluster.getNamesystem(1).getBlockPoolId();
-  verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
-  bpid = cluster.getNamesystem(3).getBlockPoolId();
-  verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+
+  GenericTestUtils.waitFor(() -> {
+try {
+  bpid = cluster.getNamesystem(1).getBlockPoolId();
+  verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
+  bpid = cluster.getNamesystem(3).getBlockPoolId();
+  verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+} catch (AssertionError ex) {
+  return false;
+}
+
+return true;
+  }, 50, 2000);
 } finally {
   if (scanner != null) {
 scanner.shutdown();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)

2018-07-24 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1d8fce0d2 -> 3210b3d8a


HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica
(Contributed by BELUGA BEHR via Daniel Templeton)

Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d
(cherry picked from commit 849c45db187224095b13fe297a4d7377fbb9d2cd)
(cherry picked from commit 00c476abd8f1d34414b646219856859477558458)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3210b3d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3210b3d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3210b3d8

Branch: refs/heads/branch-3.0
Commit: 3210b3d8aa7f1927ac2ef5f2d9e5d83969ae2c48
Parents: 1d8fce0
Author: Daniel Templeton 
Authored: Tue Jul 24 15:34:19 2018 -0700
Committer: Daniel Templeton 
Committed: Tue Jul 24 16:13:30 2018 -0700

--
 .../java/org/apache/hadoop/fs/CreateFlag.java   |  9 ++-
 .../org/apache/hadoop/hdfs/AddBlockFlag.java| 11 ++-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  3 +
 .../hadoop/hdfs/DistributedFileSystem.java  | 11 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../BlockPlacementPolicyDefault.java|  4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 30 +---
 .../server/namenode/TestFSDirWriteFileOp.java   | 79 
 8 files changed, 134 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3210b3d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 383d65a..c3e088b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -116,7 +116,14 @@ public enum CreateFlag {
* Enforce the file to be a replicated file, no matter what its parent
* directory's replication or erasure coding policy is.
*/
-  SHOULD_REPLICATE((short) 0x80);
+  SHOULD_REPLICATE((short) 0x80),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x100);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3210b3d8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
index 6a0805b..b0686d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
@@ -36,7 +36,16 @@ public enum AddBlockFlag {
*
* @see CreateFlag#NO_LOCAL_WRITE
*/
-  NO_LOCAL_WRITE((short) 0x01);
+  NO_LOCAL_WRITE((short) 0x01),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   *
+   * @see CreateFlag#IGNORE_CLIENT_LOCALITY
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x02);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3210b3d8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 9734752..e977054 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer
 if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) {
   this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE);
 }
+if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) {
+  this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY);
+}
 if 

hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)

2018-07-24 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 7e7792dd7 -> 00c476abd


HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica
(Contributed by BELUGA BEHR via Daniel Templeton)

Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d
(cherry picked from commit 849c45db187224095b13fe297a4d7377fbb9d2cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00c476ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00c476ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00c476ab

Branch: refs/heads/branch-3.1
Commit: 00c476abd8f1d34414b646219856859477558458
Parents: 7e7792d
Author: Daniel Templeton 
Authored: Tue Jul 24 15:34:19 2018 -0700
Committer: Daniel Templeton 
Committed: Tue Jul 24 16:12:43 2018 -0700

--
 .../java/org/apache/hadoop/fs/CreateFlag.java   |  9 ++-
 .../org/apache/hadoop/hdfs/AddBlockFlag.java| 11 ++-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  3 +
 .../hadoop/hdfs/DistributedFileSystem.java  | 11 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../BlockPlacementPolicyDefault.java|  4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 30 +---
 .../server/namenode/TestFSDirWriteFileOp.java   | 79 
 8 files changed, 134 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 383d65a..c3e088b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -116,7 +116,14 @@ public enum CreateFlag {
* Enforce the file to be a replicated file, no matter what its parent
* directory's replication or erasure coding policy is.
*/
-  SHOULD_REPLICATE((short) 0x80);
+  SHOULD_REPLICATE((short) 0x80),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x100);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
index 6a0805b..b0686d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
@@ -36,7 +36,16 @@ public enum AddBlockFlag {
*
* @see CreateFlag#NO_LOCAL_WRITE
*/
-  NO_LOCAL_WRITE((short) 0x01);
+  NO_LOCAL_WRITE((short) 0x01),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   *
+   * @see CreateFlag#IGNORE_CLIENT_LOCALITY
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x02);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 9734752..e977054 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer
 if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) {
   this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE);
 }
+if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) {
+  this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY);
+}
 if (progress != null) {
   DFSClient.LOG.debug("Set non-null 

hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)

2018-07-24 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6bec03cfc -> 849c45db1


HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica
(Contributed by BELUGA BEHR via Daniel Templeton)

Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/849c45db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/849c45db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/849c45db

Branch: refs/heads/trunk
Commit: 849c45db187224095b13fe297a4d7377fbb9d2cd
Parents: 6bec03c
Author: Daniel Templeton 
Authored: Tue Jul 24 15:34:19 2018 -0700
Committer: Daniel Templeton 
Committed: Tue Jul 24 16:05:27 2018 -0700

--
 .../java/org/apache/hadoop/fs/CreateFlag.java   |  9 ++-
 .../org/apache/hadoop/hdfs/AddBlockFlag.java| 11 ++-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  3 +
 .../hadoop/hdfs/DistributedFileSystem.java  | 11 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../BlockPlacementPolicyDefault.java|  4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 30 +---
 .../server/namenode/TestFSDirWriteFileOp.java   | 79 
 8 files changed, 134 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 383d65a..c3e088b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -116,7 +116,14 @@ public enum CreateFlag {
* Enforce the file to be a replicated file, no matter what its parent
* directory's replication or erasure coding policy is.
*/
-  SHOULD_REPLICATE((short) 0x80);
+  SHOULD_REPLICATE((short) 0x80),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x100);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
index 6a0805b..b0686d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
@@ -36,7 +36,16 @@ public enum AddBlockFlag {
*
* @see CreateFlag#NO_LOCAL_WRITE
*/
-  NO_LOCAL_WRITE((short) 0x01);
+  NO_LOCAL_WRITE((short) 0x01),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   *
+   * @see CreateFlag#IGNORE_CLIENT_LOCALITY
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x02);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/849c45db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 9734752..e977054 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer
 if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) {
   this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE);
 }
+if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) {
+  this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY);
+}
 if (progress != null) {
   DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
   +"{}", src);


hadoop git commit: HDFS-13636. Cross-Site Scripting vulnerability in HttpServer2 (Contributed by Haibo Yan via Daniel Templeton)

2018-06-01 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c6becec39 -> f13e01fdf


HDFS-13636. Cross-Site Scripting vulnerability in HttpServer2
(Contributed by Haibo Yan via Daniel Templeton)

Change-Id: I28edde8125dd20d8d270f0e609d1c04d8173c8b7
(cherry picked from commit cba319499822a2475c60c43ea71f8e78237e139f)
(cherry picked from commit 09fd1348e855302f6f238917a98997d935c373c8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f13e01fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f13e01fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f13e01fd

Branch: refs/heads/branch-3.0
Commit: f13e01fdfe0e6c2bce5f9615074c7c90fa65d91c
Parents: c6becec
Author: Daniel Templeton 
Authored: Fri Jun 1 14:42:39 2018 -0700
Committer: Daniel Templeton 
Committed: Fri Jun 1 14:45:01 2018 -0700

--
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f13e01fd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index b6c2f19..e83f7f8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -1358,8 +1358,11 @@ public final class HttpServer2 implements 
FilterContainer {
 
 if (servletContext.getAttribute(ADMINS_ACL) != null &&
 !userHasAdministratorAccess(servletContext, remoteUser)) {
-  response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
-  + remoteUser + " is unauthorized to access this page.");
+  response.sendError(HttpServletResponse.SC_FORBIDDEN,
+  "Unauthenticated users are not " +
+  "authorized to access this page.");
+  LOG.warn("User " + remoteUser + " is unauthorized to access the page "
+  + request.getRequestURI() + ".");
   return false;
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13636. Cross-Site Scripting vulnerability in HttpServer2 (Contributed by Haibo Yan via Daniel Templeton)

2018-06-01 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 58b5b3aa7 -> 09fd1348e


HDFS-13636. Cross-Site Scripting vulnerability in HttpServer2
(Contributed by Haibo Yan via Daniel Templeton)

Change-Id: I28edde8125dd20d8d270f0e609d1c04d8173c8b7
(cherry picked from commit cba319499822a2475c60c43ea71f8e78237e139f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09fd1348
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09fd1348
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09fd1348

Branch: refs/heads/branch-3.1
Commit: 09fd1348e855302f6f238917a98997d935c373c8
Parents: 58b5b3a
Author: Daniel Templeton 
Authored: Fri Jun 1 14:42:39 2018 -0700
Committer: Daniel Templeton 
Committed: Fri Jun 1 14:44:22 2018 -0700

--
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09fd1348/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 47ca841..3e2063a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -1415,8 +1415,11 @@ public final class HttpServer2 implements 
FilterContainer {
 
 if (servletContext.getAttribute(ADMINS_ACL) != null &&
 !userHasAdministratorAccess(servletContext, remoteUser)) {
-  response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
-  + remoteUser + " is unauthorized to access this page.");
+  response.sendError(HttpServletResponse.SC_FORBIDDEN,
+  "Unauthenticated users are not " +
+  "authorized to access this page.");
+  LOG.warn("User " + remoteUser + " is unauthorized to access the page "
+  + request.getRequestURI() + ".");
   return false;
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13636. Cross-Site Scripting vulnerability in HttpServer2 (Contributed by Haibo Yan via Daniel Templeton)

2018-06-01 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1be05a362 -> cba319499


HDFS-13636. Cross-Site Scripting vulnerability in HttpServer2
(Contributed by Haibo Yan via Daniel Templeton)

Change-Id: I28edde8125dd20d8d270f0e609d1c04d8173c8b7


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cba31949
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cba31949
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cba31949

Branch: refs/heads/trunk
Commit: cba319499822a2475c60c43ea71f8e78237e139f
Parents: 1be05a3
Author: Daniel Templeton 
Authored: Fri Jun 1 14:42:39 2018 -0700
Committer: Daniel Templeton 
Committed: Fri Jun 1 14:42:39 2018 -0700

--
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba31949/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index c273c78..2435671 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -1420,8 +1420,11 @@ public final class HttpServer2 implements 
FilterContainer {
 
 if (servletContext.getAttribute(ADMINS_ACL) != null &&
 !userHasAdministratorAccess(servletContext, remoteUser)) {
-  response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
-  + remoteUser + " is unauthorized to access this page.");
+  response.sendError(HttpServletResponse.SC_FORBIDDEN,
+  "Unauthenticated users are not " +
+  "authorized to access this page.");
+  LOG.warn("User " + remoteUser + " is unauthorized to access the page "
+  + request.getRequestURI() + ".");
   return false;
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed intermittently (Contributed by Gabor Bota via Daniel Templeton)

2018-05-02 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c2ed61188 -> 184ecec29


HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed 
intermittently
(Contributed by Gabor Bota via Daniel Templeton)

Change-Id: I9921981dfa69669fe7912dd2a31ae8b638283204
(cherry picked from commit 87c23ef643393c39e8353ca9f495b0c8f97cdbd9)
(cherry picked from commit 57e41154365aa260ad2b2117779a77196c918eb0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/184ecec2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/184ecec2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/184ecec2

Branch: refs/heads/branch-3.0
Commit: 184ecec29cc559be0bcaa35122afffaf159cd23b
Parents: c2ed611
Author: Daniel Templeton 
Authored: Wed May 2 16:54:42 2018 -0700
Committer: Daniel Templeton 
Committed: Wed May 2 17:18:59 2018 -0700

--
 .../hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/184ecec2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
index da85b9b..0f90d82 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
@@ -182,7 +182,8 @@ public class RollingFileSystemSinkTestBase {
 .add(prefix + ".sink.mysink0.ignore-error", ignoreErrors)
 .add(prefix + ".sink.mysink0.allow-append", allowAppend)
 .add(prefix + ".sink.mysink0.roll-offset-interval-millis", 0)
-.add(prefix + ".sink.mysink0.roll-interval", "1h");
+.add(prefix + ".sink.mysink0.roll-interval", "1h")
+.add("*.queue.capacity", 2);
 
 if (useSecureParams) {
   builder.add(prefix + ".sink.mysink0.keytab-key", SINK_KEYTAB_FILE_KEY)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed intermittently (Contributed by Gabor Bota via Daniel Templeton)

2018-05-02 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 61fac20c9 -> 57e411543


HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed 
intermittently
(Contributed by Gabor Bota via Daniel Templeton)

Change-Id: I9921981dfa69669fe7912dd2a31ae8b638283204
(cherry picked from commit 87c23ef643393c39e8353ca9f495b0c8f97cdbd9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57e41154
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57e41154
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57e41154

Branch: refs/heads/branch-3.1
Commit: 57e41154365aa260ad2b2117779a77196c918eb0
Parents: 61fac20
Author: Daniel Templeton 
Authored: Wed May 2 16:54:42 2018 -0700
Committer: Daniel Templeton 
Committed: Wed May 2 17:17:46 2018 -0700

--
 .../hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57e41154/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
index da85b9b..0f90d82 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
@@ -182,7 +182,8 @@ public class RollingFileSystemSinkTestBase {
 .add(prefix + ".sink.mysink0.ignore-error", ignoreErrors)
 .add(prefix + ".sink.mysink0.allow-append", allowAppend)
 .add(prefix + ".sink.mysink0.roll-offset-interval-millis", 0)
-.add(prefix + ".sink.mysink0.roll-interval", "1h");
+.add(prefix + ".sink.mysink0.roll-interval", "1h")
+.add("*.queue.capacity", 2);
 
 if (useSecureParams) {
   builder.add(prefix + ".sink.mysink0.keytab-key", SINK_KEYTAB_FILE_KEY)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed intermittently (Contributed by Gabor Bota via Daniel Templeton)

2018-05-02 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk f4d280f02 -> 87c23ef64


HDFS-13481. TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed 
intermittently
(Contributed by Gabor Bota via Daniel Templeton)

Change-Id: I9921981dfa69669fe7912dd2a31ae8b638283204


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87c23ef6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87c23ef6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87c23ef6

Branch: refs/heads/trunk
Commit: 87c23ef643393c39e8353ca9f495b0c8f97cdbd9
Parents: f4d280f
Author: Daniel Templeton 
Authored: Wed May 2 16:54:42 2018 -0700
Committer: Daniel Templeton 
Committed: Wed May 2 17:13:40 2018 -0700

--
 .../hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c23ef6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
index da85b9b..0f90d82 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSinkTestBase.java
@@ -182,7 +182,8 @@ public class RollingFileSystemSinkTestBase {
 .add(prefix + ".sink.mysink0.ignore-error", ignoreErrors)
 .add(prefix + ".sink.mysink0.allow-append", allowAppend)
 .add(prefix + ".sink.mysink0.roll-offset-interval-millis", 0)
-.add(prefix + ".sink.mysink0.roll-interval", "1h");
+.add(prefix + ".sink.mysink0.roll-interval", "1h")
+.add("*.queue.capacity", 2);
 
 if (useSecureParams) {
   builder.add(prefix + ".sink.mysink0.keytab-key", SINK_KEYTAB_FILE_KEY)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7589. TestPBImplRecords fails with NullPointerException. Contributed by Daniel Templeton

2017-12-01 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 60adf0b8b -> 419a83e13


YARN-7589. TestPBImplRecords fails with NullPointerException. Contributed by 
Daniel Templeton

(cherry picked from commit 25df5054216a6a76d09d9c49984f8075ebc6a197)
(cherry picked from commit f222b9d362936621e72c1460d3a9e05e2de4c78b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/419a83e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/419a83e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/419a83e1

Branch: refs/heads/branch-3.0.0
Commit: 419a83e1333eb6d4e66a3334dd4c4af33ef77e34
Parents: 60adf0b
Author: Jason Lowe 
Authored: Fri Dec 1 15:37:36 2017 -0600
Committer: Daniel Templeton 
Committed: Fri Dec 1 14:27:54 2017 -0800

--
 .../org/apache/hadoop/yarn/api/records/Resource.java   |  9 ++---
 .../hadoop/yarn/util/resource/ResourceUtils.java   | 13 +
 2 files changed, 15 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/419a83e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 5f1455f..2b367bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -100,9 +100,12 @@ public abstract class Resource implements 
Comparable {
   @Stable
   public static Resource newInstance(long memory, int vCores,
   Map others) {
-ResourceInformation[] info = 
ResourceUtils.createResourceTypesArray(others);
-
-return new LightWeightResource(memory, vCores, info);
+if (others != null) {
+  return new LightWeightResource(memory, vCores,
+  ResourceUtils.createResourceTypesArray(others));
+} else {
+  return newInstance(memory, vCores);
+}
   }
 
   @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/419a83e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 04c2412..e4effdb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -345,15 +345,13 @@ public class ResourceUtils {
   }
 
   public static ResourceInformation[] getResourceTypesArray() {
-initializeResourceTypesIfNeeded(null,
-YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE);
+initializeResourceTypesIfNeeded();
 return resourceTypesArray;
   }
 
   public static int getNumberOfKnownResourceTypes() {
 if (numKnownResourceTypes < 0) {
-  initializeResourceTypesIfNeeded(null,
-  YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE);
+  initializeResourceTypesIfNeeded();
 }
 return numKnownResourceTypes;
   }
@@ -364,6 +362,11 @@ public class ResourceUtils {
 YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE);
   }
 
+  private static void initializeResourceTypesIfNeeded() {
+initializeResourceTypesIfNeeded(null,
+YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE);
+  }
+
   private static void initializeResourceTypesIfNeeded(Configuration conf,
   String resourceFile) {
 if (!initializedResources) {
@@ -674,6 +677,8 @@ public class ResourceUtils {
*/
   public static ResourceInformation[] createResourceTypesArray(Map res) {
+initializeResourceTypesIfNeeded();
+
 ResourceInformation[] info = new ResourceInformation[resourceTypes.size()];
 
 for (Entry entry : RESOURCE_NAME_TO_INDEX.entrySet()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13493. Compatibility Docs should clarify the policy for what takes precedence when a conflict is found (templedf via rkanter)

2017-11-30 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 9de795f12 -> 7dd435cd8


HADOOP-13493. Compatibility Docs should clarify the policy for what takes 
precedence when a conflict is found (templedf via rkanter)

(cherry picked from commit 75a3ab88f5f4ea6abf0a56cb8058e17b5a5fe403)
(cherry picked from commit 2c2ff7da052c35f62dff3ede22bf6c1ddec6c5bb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dd435cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dd435cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dd435cd

Branch: refs/heads/branch-3.0.0
Commit: 7dd435cd817bfda7a257c172ef04c6a264b6b6b9
Parents: 9de795f
Author: Robert Kanter <rkan...@apache.org>
Authored: Thu Nov 30 07:39:15 2017 -0800
Committer: Daniel Templeton <templ...@apache.org>
Committed: Thu Nov 30 09:27:12 2017 -0800

--
 .../src/site/markdown/Compatibility.md  | 29 +++-
 1 file changed, 22 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd435cd/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 461ff17..54be412 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -117,13 +117,7 @@ Compatibility types
 
 Developers SHOULD annotate all Hadoop interfaces and classes with the
 @InterfaceAudience and @InterfaceStability annotations to describe the
-intended audience and stability. Annotations may be at the package, class, or
-member variable or method level. Member variable and method annotations SHALL
-override class annotations, and class annotations SHALL override package
-annotations. A package, class, or member variable or method that is not
-annotated SHALL be interpreted as implicitly
-[Private](./InterfaceClassification.html#Private) and
-[Unstable](./InterfaceClassification.html#Unstable).
+intended audience and stability.
 
 * @InterfaceAudience captures the intended audience. Possible values are
 [Public](./InterfaceClassification.html#Public) (for end users and external
@@ -134,6 +128,27 @@ etc.), and 
[Private](./InterfaceClassification.html#Private)
 * @InterfaceStability describes what types of interface changes are permitted. 
Possible values are [Stable](./InterfaceClassification.html#Stable), 
[Evolving](./InterfaceClassification.html#Evolving), and 
[Unstable](./InterfaceClassification.html#Unstable).
 * @Deprecated notes that the package, class, or member variable or method 
could potentially be removed in the future and should not be used.
 
+Annotations MAY be applied at the package, class, or method level. If a method
+has no privacy or stability annotation, it SHALL inherit its intended audience
+or stability level from the class to which it belongs. If a class has no
+privacy or stability annotation, it SHALL inherit its intended audience or
+stability level from the package to which it belongs. If a package has no
+privacy or stability annotation, it SHALL be assumed to be
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable),
+respectively.
+
+In the event that an element's audience or stability annotation conflicts with
+the corresponding annotation of its parent (whether explicit or inherited), the
+element's audience or stability (respectively) SHALL be determined by the
+more restrictive annotation. For example, if a
+[Private](./InterfaceClassification.html#Private) method is contained
+in a [Public](./InterfaceClassification.html#Public) class, then the method
+SHALL be treated as [Private](./InterfaceClassification.html#Private). If a
+[Public](./InterfaceClassification.html#Public) method is contained in a
+[Private](./InterfaceClassification.html#Private) class, the method SHALL be
+treated as [Private](./InterfaceClassification.html#Private).
+
  Use Cases
 
 * 
[Public](./InterfaceClassification.html#Public)-[Stable](./InterfaceClassification.html#Stable)
 API compatibility is required to ensure end-user programs and downstream 
projects continue to work without modification.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7541. Node updates don't update the maximum cluster capability for resources other than CPU and memory

2017-11-30 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 2c2ff7da0 -> 40372be7b


YARN-7541. Node updates don't update the maximum cluster capability for 
resources other than CPU and memory

(cherry picked from commit 8498d287cd3beddcf8fe19625227e09982ec4be2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40372be7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40372be7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40372be7

Branch: refs/heads/branch-3.0
Commit: 40372be7bb2b08dcfd94d077233683ddb1edace1
Parents: 2c2ff7d
Author: Daniel Templeton 
Authored: Wed Nov 29 10:36:19 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Nov 30 09:26:25 2017 -0800

--
 .../hadoop/yarn/api/records/Resource.java   |  21 
 .../yarn/util/resource/ResourceUtils.java   |  31 -
 .../scheduler/ClusterNodeTracker.java   |  79 
 .../yarn/server/resourcemanager/MockNodes.java  |   4 +
 .../scheduler/TestClusterNodeTracker.java   | 125 ++-
 5 files changed, 230 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40372be7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index e863d68..5f1455f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.api.records;
 
 import java.util.Arrays;
+import java.util.Map;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -84,6 +85,26 @@ public abstract class Resource implements 
Comparable {
 return new LightWeightResource(memory, vCores);
   }
 
+  /**
+   * Create a new {@link Resource} instance with the given CPU and memory
+   * values and additional resource values as set in the {@code others}
+   * parameter. Note that the CPU and memory settings in the {@code others}
+   * parameter will be ignored.
+   *
+   * @param memory the memory value
+   * @param vCores the CPU value
+   * @param others a map of other resource values indexed by resource name
+   * @return a {@link Resource} instance with the given resource values
+   */
+  @Public
+  @Stable
+  public static Resource newInstance(long memory, int vCores,
+  Map others) {
+ResourceInformation[] info = 
ResourceUtils.createResourceTypesArray(others);
+
+return new LightWeightResource(memory, vCores, info);
+  }
+
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
   public static Resource newInstance(Resource resource) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40372be7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index ddd3901..571b73e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -609,7 +609,6 @@ public class ResourceUtils {
 return result;
   }
 
-
   /**
* Reinitialize all resource types from external source (in case of client,
* server will send the updated list and local resourceutils cache will be
@@ -630,4 +629,34 @@ public class ResourceUtils {
 ResourceUtils
 .initializeResourcesFromResourceInformationMap(resourceInformationMap);
   }
+
+  /**
+   * Create an array of {@link ResourceInformation} objects corresponding to
+   * the passed in map of names to values. The array will be ordered according
+   * to the order returned by {@link #getResourceTypesArray()}. The value of
+   * each resource type in the returned array will either be the value given 
for
+   * that resource in the {@code res} parameter or, if none is given, 0.
+   *
+   * @param res the map of resource type values
+   * @return an array of 

hadoop git commit: YARN-7541. Node updates don't update the maximum cluster capability for resources other than CPU and memory

2017-11-30 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 60b35eccf -> 9de795f12


YARN-7541. Node updates don't update the maximum cluster capability for 
resources other than CPU and memory

(cherry picked from commit 8498d287cd3beddcf8fe19625227e09982ec4be2)
(cherry picked from commit 0e73efb1ad68398ce5f7cde51466cea8c2153659)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9de795f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9de795f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9de795f1

Branch: refs/heads/branch-3.0.0
Commit: 9de795f12a203bb4943cb138283e310368a0436f
Parents: 60b35ec
Author: Daniel Templeton 
Authored: Wed Nov 29 10:36:19 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Nov 30 09:26:46 2017 -0800

--
 .../hadoop/yarn/api/records/Resource.java   |  21 
 .../yarn/util/resource/ResourceUtils.java   |  31 -
 .../scheduler/ClusterNodeTracker.java   |  79 
 .../yarn/server/resourcemanager/MockNodes.java  |   4 +
 .../scheduler/TestClusterNodeTracker.java   | 125 ++-
 5 files changed, 230 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9de795f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index e863d68..5f1455f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.api.records;
 
 import java.util.Arrays;
+import java.util.Map;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -84,6 +85,26 @@ public abstract class Resource implements 
Comparable {
 return new LightWeightResource(memory, vCores);
   }
 
+  /**
+   * Create a new {@link Resource} instance with the given CPU and memory
+   * values and additional resource values as set in the {@code others}
+   * parameter. Note that the CPU and memory settings in the {@code others}
+   * parameter will be ignored.
+   *
+   * @param memory the memory value
+   * @param vCores the CPU value
+   * @param others a map of other resource values indexed by resource name
+   * @return a {@link Resource} instance with the given resource values
+   */
+  @Public
+  @Stable
+  public static Resource newInstance(long memory, int vCores,
+  Map others) {
+ResourceInformation[] info = 
ResourceUtils.createResourceTypesArray(others);
+
+return new LightWeightResource(memory, vCores, info);
+  }
+
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
   public static Resource newInstance(Resource resource) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9de795f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 540cd9e..04c2412 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -641,7 +641,6 @@ public class ResourceUtils {
 return result;
   }
 
-
   /**
* Reinitialize all resource types from external source (in case of client,
* server will send the updated list and local resourceutils cache will be
@@ -662,4 +661,34 @@ public class ResourceUtils {
 ResourceUtils
 .initializeResourcesFromResourceInformationMap(resourceInformationMap);
   }
+
+  /**
+   * Create an array of {@link ResourceInformation} objects corresponding to
+   * the passed in map of names to values. The array will be ordered according
+   * to the order returned by {@link #getResourceTypesArray()}. The value of
+   * each resource type in the returned array will either be the value given 
for
+   * that resource in the {@code res} parameter or, if none is given, 0.
+   *
+   * 

hadoop git commit: YARN-7541. Node updates don't update the maximum cluster capability for resources other than CPU and memory

2017-11-29 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 301641811 -> 8498d287c


YARN-7541. Node updates don't update the maximum cluster capability for 
resources other than CPU and memory


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8498d287
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8498d287
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8498d287

Branch: refs/heads/trunk
Commit: 8498d287cd3beddcf8fe19625227e09982ec4be2
Parents: 3016418
Author: Daniel Templeton 
Authored: Wed Nov 29 10:36:19 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 29 11:11:36 2017 -0800

--
 .../hadoop/yarn/api/records/Resource.java   |  24 +++-
 .../yarn/util/resource/ResourceUtils.java   |  29 +
 .../scheduler/ClusterNodeTracker.java   |  79 
 .../yarn/server/resourcemanager/MockNodes.java  |   4 +
 .../scheduler/TestClusterNodeTracker.java   | 125 ++-
 5 files changed, 229 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8498d287/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index abd44b8..b32955b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -21,11 +21,9 @@ package org.apache.hadoop.yarn.api.records;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Map;
 
-import com.google.common.collect.Lists;
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.NotImplementedException;
-import org.apache.curator.shaded.com.google.common.reflect.ClassPath;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -89,6 +87,26 @@ public abstract class Resource implements 
Comparable {
 return new LightWeightResource(memory, vCores);
   }
 
+  /**
+   * Create a new {@link Resource} instance with the given CPU and memory
+   * values and additional resource values as set in the {@code others}
+   * parameter. Note that the CPU and memory settings in the {@code others}
+   * parameter will be ignored.
+   *
+   * @param memory the memory value
+   * @param vCores the CPU value
+   * @param others a map of other resource values indexed by resource name
+   * @return a {@link Resource} instance with the given resource values
+   */
+  @Public
+  @Stable
+  public static Resource newInstance(long memory, int vCores,
+  Map others) {
+ResourceInformation[] info = 
ResourceUtils.createResourceTypesArray(others);
+
+return new LightWeightResource(memory, vCores, info);
+  }
+
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
   public static Resource newInstance(Resource resource) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8498d287/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index c168337..3c6ca98 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -629,4 +629,33 @@ public class ResourceUtils {
 return result;
   }
 
+  /**
+   * Create an array of {@link ResourceInformation} objects corresponding to
+   * the passed in map of names to values. The array will be ordered according
+   * to the order returned by {@link #getResourceTypesArray()}. The value of
+   * each resource type in the returned array will either be the value given 
for
+   * that resource in the {@code res} parameter or, if none is given, 0.
+   *
+   * @param res the map of resource type values
+   * @return an array of {@link ResourceInformation} instances
+   */
+  public static 

hadoop git commit: HADOOP-14876. Create downstream developer docs from the compatibility guidelines

2017-11-22 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 6c633e64a -> d7c9b58ec


HADOOP-14876. Create downstream developer docs from the compatibility guidelines

(cherry picked from commit bfd588789a879b8583ea4abd59f4f5843c5ac285)
(cherry picked from commit 4a70e7538678d104b4b11b8889e14f908af10c00)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7c9b58e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7c9b58e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7c9b58e

Branch: refs/heads/branch-3.0.0
Commit: d7c9b58ec737bbe1525fd5b490924c88426c2b3f
Parents: 6c633e6
Author: Daniel Templeton 
Authored: Tue Nov 14 13:19:14 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 22 12:27:37 2017 -0800

--
 .../src/site/markdown/Compatibility.md  | 132 --
 .../src/site/markdown/DownstreamDev.md  | 432 +++
 hadoop-project/src/site/site.xml|   3 +-
 3 files changed, 528 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7c9b58e/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 47fa09a..461ff17 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -63,6 +63,14 @@ when the various labels are appropriate. As a general rule, 
all new interfaces
 and APIs should have the most limited labels (e.g. Private Unstable) that will
 not inhibit the intent of the interface or API.
 
+### Structure
+
+This document is arranged in sections according to the various compatibility
+concerns. Within each section an introductory text explains what compatibility
+means in that section, why it's important, and what the intent to support
+compatibility is. The subsequent "Policy" section then sets forth in specific
+terms what the governing policy is.
+
 ### Notational Conventions
 
 The key words "MUST" "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
@@ -77,12 +85,18 @@ flagged for removal. The standard meaning of the annotation 
is that the
 API element should not be used and may be removed in a later version.
 
 In all cases removing an element from an API is an incompatible
-change. In the case of [Stable](./InterfaceClassification.html#Stable) APIs,
-the change cannot be made between minor releases within the same major
-version. In addition, to allow consumers of the API time to adapt to the 
change,
-the API element to be removed should be marked as deprecated for a full major
-release before it is removed. For example, if a method is marked as deprecated
-in Hadoop 2.8, it cannot be removed until Hadoop 4.0.
+change. The stability of the element SHALL determine when such a change is
+permissible. A [Stable](./InterfaceClassification.html#Stable) element MUST
+be marked as deprecated for a full major release before it can be removed and
+SHALL NOT be removed in a minor or maintenance release. An
+[Evolving](./InterfaceClassification.html#Evolving) element MUST be marked as
+deprecated for a full minor release before it can be removed and SHALL NOT be
+removed during a maintenance release. An
+[Unstable](./InterfaceClassification.html#Unstable) element MAY be removed at
+any time. When possible an [Unstable](./InterfaceClassification.html#Unstable)
+element SHOULD be marked as deprecated for at least one release before being
+removed. For example, if a method is marked as deprecated in Hadoop 2.8, it
+cannot be removed until Hadoop 4.0.
 
 ### Policy
 
@@ -141,7 +155,7 @@ in hand.
  Semantic compatibility
 
 Apache Hadoop strives to ensure that the behavior of APIs remains consistent
-over versions, though changes for correctness may result in changes in
+across releases, though changes for correctness may result in changes in
 behavior. API behavior SHALL be specified by the JavaDoc API documentation
 where present and complete. When JavaDoc API documentation is not available,
 behavior SHALL be specified by the behavior expected by the related unit tests.
@@ -229,8 +243,8 @@ transports, such as SSL. Upgrading a service from SSLv2 to 
SSLv3 may break
 existing SSLv2 clients. The minimum supported major version of any transports
 MUST not increase across minor releases within a major version.
 
-Service ports are considered as part of the transport mechanism. Fixed
-service port numbers MUST be kept consistent to prevent breaking clients.
+Service ports are considered as part of the transport 

[3/3] hadoop git commit: YARN-7414. FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() (Contributed by Soumabrata Chakraborty via Daniel Templeton)

2017-11-15 Thread templedf
YARN-7414. FairScheduler#getAppWeight() should be moved into 
FSAppAttempt#getWeight()
(Contributed by Soumabrata Chakraborty via Daniel Templeton)

(cherry picked from commit b246c547490dd94271806ca4caf1e5f199f0fb09)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/570bc809
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/570bc809
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/570bc809

Branch: refs/heads/branch-3.0
Commit: 570bc809019af5d9cc1027fa63f1ee9622673db7
Parents: 5536fa1
Author: Daniel Templeton 
Authored: Wed Nov 15 09:56:37 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 15 10:06:17 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java   | 15 ++-
 .../scheduler/fair/FairScheduler.java  | 17 ++---
 2 files changed, 16 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/570bc809/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index bbd4418..94991eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -1304,7 +1304,20 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
   @Override
   public float getWeight() {
-return scheduler.getAppWeight(this);
+double weight = 1.0;
+
+if (scheduler.isSizeBasedWeight()) {
+  scheduler.getSchedulerReadLock().lock();
+
+  try {
+// Set weight based on current memory demand
+weight = Math.log1p(getDemand().getMemorySize()) / Math.log(2);
+  } finally {
+scheduler.getSchedulerReadLock().unlock();
+  }
+}
+
+return (float)weight * this.getPriority().getPriority();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/570bc809/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 7f1b91e..b2978d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -368,21 +368,8 @@ public class FairScheduler extends
 return rmContext.getContainerTokenSecretManager();
   }
 
-  public float getAppWeight(FSAppAttempt app) {
-double weight = 1.0;
-
-if (sizeBasedWeight) {
-  readLock.lock();
-
-  try {
-// Set weight based on current memory demand
-weight = Math.log1p(app.getDemand().getMemorySize()) / Math.log(2);
-  } finally {
-readLock.unlock();
-  }
-}
-
-return (float)weight * app.getPriority().getPriority();
+  public boolean isSizeBasedWeight() {
+return sizeBasedWeight;
   }
 
   public Resource getIncrementResourceCapability() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HADOOP-14876. Create downstream developer docs from the compatibility guidelines

2017-11-15 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1c8d33428 -> 570bc8090


HADOOP-14876. Create downstream developer docs from the compatibility guidelines

(cherry picked from commit bfd588789a879b8583ea4abd59f4f5843c5ac285)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a70e753
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a70e753
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a70e753

Branch: refs/heads/branch-3.0
Commit: 4a70e7538678d104b4b11b8889e14f908af10c00
Parents: 1c8d334
Author: Daniel Templeton 
Authored: Tue Nov 14 13:19:14 2017 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 14 13:20:27 2017 -0800

--
 .../src/site/markdown/Compatibility.md  | 132 --
 .../src/site/markdown/DownstreamDev.md  | 432 +++
 hadoop-project/src/site/site.xml|   3 +-
 3 files changed, 528 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a70e753/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 47fa09a..461ff17 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -63,6 +63,14 @@ when the various labels are appropriate. As a general rule, 
all new interfaces
 and APIs should have the most limited labels (e.g. Private Unstable) that will
 not inhibit the intent of the interface or API.
 
+### Structure
+
+This document is arranged in sections according to the various compatibility
+concerns. Within each section an introductory text explains what compatibility
+means in that section, why it's important, and what the intent to support
+compatibility is. The subsequent "Policy" section then sets forth in specific
+terms what the governing policy is.
+
 ### Notational Conventions
 
 The key words "MUST" "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
@@ -77,12 +85,18 @@ flagged for removal. The standard meaning of the annotation 
is that the
 API element should not be used and may be removed in a later version.
 
 In all cases removing an element from an API is an incompatible
-change. In the case of [Stable](./InterfaceClassification.html#Stable) APIs,
-the change cannot be made between minor releases within the same major
-version. In addition, to allow consumers of the API time to adapt to the 
change,
-the API element to be removed should be marked as deprecated for a full major
-release before it is removed. For example, if a method is marked as deprecated
-in Hadoop 2.8, it cannot be removed until Hadoop 4.0.
+change. The stability of the element SHALL determine when such a change is
+permissible. A [Stable](./InterfaceClassification.html#Stable) element MUST
+be marked as deprecated for a full major release before it can be removed and
+SHALL NOT be removed in a minor or maintenance release. An
+[Evolving](./InterfaceClassification.html#Evolving) element MUST be marked as
+deprecated for a full minor release before it can be removed and SHALL NOT be
+removed during a maintenance release. An
+[Unstable](./InterfaceClassification.html#Unstable) element MAY be removed at
+any time. When possible an [Unstable](./InterfaceClassification.html#Unstable)
+element SHOULD be marked as deprecated for at least one release before being
+removed. For example, if a method is marked as deprecated in Hadoop 2.8, it
+cannot be removed until Hadoop 4.0.
 
 ### Policy
 
@@ -141,7 +155,7 @@ in hand.
  Semantic compatibility
 
 Apache Hadoop strives to ensure that the behavior of APIs remains consistent
-over versions, though changes for correctness may result in changes in
+across releases, though changes for correctness may result in changes in
 behavior. API behavior SHALL be specified by the JavaDoc API documentation
 where present and complete. When JavaDoc API documentation is not available,
 behavior SHALL be specified by the behavior expected by the related unit tests.
@@ -229,8 +243,8 @@ transports, such as SSL. Upgrading a service from SSLv2 to 
SSLv3 may break
 existing SSLv2 clients. The minimum supported major version of any transports
 MUST not increase across minor releases within a major version.
 
-Service ports are considered as part of the transport mechanism. Fixed
-service port numbers MUST be kept consistent to prevent breaking clients.
+Service ports are considered as part of the transport mechanism. Default
+service port numbers must be kept consistent to prevent 

[2/3] hadoop git commit: YARN-6953. Clean up ResourceUtils.setMinimumAllocationForMandatoryResources() and setMaximumAllocationForMandatoryResources() (Contributed by Manikandan R via Daniel Templeton

2017-11-15 Thread templedf
YARN-6953. Clean up ResourceUtils.setMinimumAllocationForMandatoryResources() 
and setMaximumAllocationForMandatoryResources()
(Contributed by Manikandan R via Daniel Templeton)

(cherry picked from commit e094eb74b9e7d8c3c6f1990445d248b062cc230b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5536fa1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5536fa1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5536fa1e

Branch: refs/heads/branch-3.0
Commit: 5536fa1e372ac13253bef69158b6bf0c865a70ba
Parents: 4a70e75
Author: Daniel Templeton 
Authored: Wed Nov 15 09:55:40 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 15 10:06:11 2017 -0800

--
 .../yarn/util/resource/ResourceUtils.java   | 108 +++
 1 file changed, 38 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5536fa1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 540cd9e..ddd3901 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -143,74 +143,44 @@ public class ResourceUtils {
 }
   }
 
-  private static void setMinimumAllocationForMandatoryResources(
+  private static void setAllocationForMandatoryResources(
   Map res, Configuration conf) {
-String[][] resourceTypesKeys = {
-{ResourceInformation.MEMORY_MB.getName(),
-YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-String.valueOf(
-YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB),
-ResourceInformation.MEMORY_MB.getName()},
-{ResourceInformation.VCORES.getName(),
-YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
-String.valueOf(
-
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES),
-ResourceInformation.VCORES.getName()}};
-for (String[] arr : resourceTypesKeys) {
-  String resourceTypesKey =
-  YarnConfiguration.RESOURCE_TYPES + "." + arr[0] + MINIMUM_ALLOCATION;
-  long minimumResourceTypes = conf.getLong(resourceTypesKey, -1);
-  long minimumConf = conf.getLong(arr[1], -1);
-  long minimum;
-  if (minimumResourceTypes != -1) {
-minimum = minimumResourceTypes;
-if (minimumConf != -1) {
-  LOG.warn("Using minimum allocation for memory specified in "
-  + "resource-types config file with key "
-  + minimumResourceTypes + ", ignoring minimum specified using "
-  + arr[1]);
-}
-  } else {
-minimum = conf.getLong(arr[1], Long.parseLong(arr[2]));
-  }
-  ResourceInformation ri = res.get(arr[3]);
-  ri.setMinimumAllocation(minimum);
-}
-  }
-
-  private static void setMaximumAllocationForMandatoryResources(
-  Map res, Configuration conf) {
-String[][] resourceTypesKeys = {
-{ResourceInformation.MEMORY_MB.getName(),
-YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
-String.valueOf(
-YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB),
-ResourceInformation.MEMORY_MB.getName()},
-{ResourceInformation.VCORES.getName(),
-YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
-String.valueOf(
-
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES),
-ResourceInformation.VCORES.getName()}};
-for (String[] arr : resourceTypesKeys) {
-  String resourceTypesKey =
-  YarnConfiguration.RESOURCE_TYPES + "." + arr[0] + MAXIMUM_ALLOCATION;
-  long maximumResourceTypes = conf.getLong(resourceTypesKey, -1);
-  long maximumConf = conf.getLong(arr[1], -1);
-  long maximum;
-  if (maximumResourceTypes != -1) {
-maximum = maximumResourceTypes;
-if (maximumConf != -1) {
-  LOG.warn("Using maximum allocation for memory specified in "
-  + "resource-types config file with key "
-  + maximumResourceTypes + ", ignoring maximum specified using "
-  + arr[1]);
-}
-  } else {
-maximum 

[3/3] hadoop git commit: HADOOP-14876. Create downstream developer docs from the compatibility guidelines

2017-11-15 Thread templedf
HADOOP-14876. Create downstream developer docs from the compatibility guidelines


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69043ba8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69043ba8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69043ba8

Branch: refs/heads/trunk
Commit: 69043ba8b5da2d66a80b6209915da1a0865ca46f
Parents: 2f379d4
Author: Daniel Templeton 
Authored: Tue Nov 14 13:19:14 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 15 10:03:29 2017 -0800

--
 .../src/site/markdown/Compatibility.md  | 132 --
 .../src/site/markdown/DownstreamDev.md  | 432 +++
 hadoop-project/src/site/site.xml|   3 +-
 3 files changed, 528 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69043ba8/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 47fa09a..461ff17 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -63,6 +63,14 @@ when the various labels are appropriate. As a general rule, 
all new interfaces
 and APIs should have the most limited labels (e.g. Private Unstable) that will
 not inhibit the intent of the interface or API.
 
+### Structure
+
+This document is arranged in sections according to the various compatibility
+concerns. Within each section an introductory text explains what compatibility
+means in that section, why it's important, and what the intent to support
+compatibility is. The subsequent "Policy" section then sets forth in specific
+terms what the governing policy is.
+
 ### Notational Conventions
 
 The key words "MUST" "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
@@ -77,12 +85,18 @@ flagged for removal. The standard meaning of the annotation 
is that the
 API element should not be used and may be removed in a later version.
 
 In all cases removing an element from an API is an incompatible
-change. In the case of [Stable](./InterfaceClassification.html#Stable) APIs,
-the change cannot be made between minor releases within the same major
-version. In addition, to allow consumers of the API time to adapt to the 
change,
-the API element to be removed should be marked as deprecated for a full major
-release before it is removed. For example, if a method is marked as deprecated
-in Hadoop 2.8, it cannot be removed until Hadoop 4.0.
+change. The stability of the element SHALL determine when such a change is
+permissible. A [Stable](./InterfaceClassification.html#Stable) element MUST
+be marked as deprecated for a full major release before it can be removed and
+SHALL NOT be removed in a minor or maintenance release. An
+[Evolving](./InterfaceClassification.html#Evolving) element MUST be marked as
+deprecated for a full minor release before it can be removed and SHALL NOT be
+removed during a maintenance release. An
+[Unstable](./InterfaceClassification.html#Unstable) element MAY be removed at
+any time. When possible an [Unstable](./InterfaceClassification.html#Unstable)
+element SHOULD be marked as deprecated for at least one release before being
+removed. For example, if a method is marked as deprecated in Hadoop 2.8, it
+cannot be removed until Hadoop 4.0.
 
 ### Policy
 
@@ -141,7 +155,7 @@ in hand.
  Semantic compatibility
 
 Apache Hadoop strives to ensure that the behavior of APIs remains consistent
-over versions, though changes for correctness may result in changes in
+across releases, though changes for correctness may result in changes in
 behavior. API behavior SHALL be specified by the JavaDoc API documentation
 where present and complete. When JavaDoc API documentation is not available,
 behavior SHALL be specified by the behavior expected by the related unit tests.
@@ -229,8 +243,8 @@ transports, such as SSL. Upgrading a service from SSLv2 to 
SSLv3 may break
 existing SSLv2 clients. The minimum supported major version of any transports
 MUST not increase across minor releases within a major version.
 
-Service ports are considered as part of the transport mechanism. Fixed
-service port numbers MUST be kept consistent to prevent breaking clients.
+Service ports are considered as part of the transport mechanism. Default
+service port numbers must be kept consistent to prevent breaking clients.
 
  Policy
 
@@ -281,9 +295,8 @@ according to the following:
 * Client-Server compatibility MUST be maintained so as to allow upgrading 

[2/3] hadoop git commit: YARN-6953. Clean up ResourceUtils.setMinimumAllocationForMandatoryResources() and setMaximumAllocationForMandatoryResources() (Contributed by Manikandan R via Daniel Templeton

2017-11-15 Thread templedf
YARN-6953. Clean up ResourceUtils.setMinimumAllocationForMandatoryResources() 
and setMaximumAllocationForMandatoryResources()
(Contributed by Manikandan R via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e094eb74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e094eb74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e094eb74

Branch: refs/heads/trunk
Commit: e094eb74b9e7d8c3c6f1990445d248b062cc230b
Parents: 69043ba
Author: Daniel Templeton 
Authored: Wed Nov 15 09:55:40 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 15 10:03:29 2017 -0800

--
 .../yarn/util/resource/ResourceUtils.java   | 108 +++
 1 file changed, 38 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e094eb74/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 3deace8..c168337 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -142,74 +142,44 @@ public class ResourceUtils {
 }
   }
 
-  private static void setMinimumAllocationForMandatoryResources(
+  private static void setAllocationForMandatoryResources(
   Map res, Configuration conf) {
-String[][] resourceTypesKeys = {
-{ResourceInformation.MEMORY_MB.getName(),
-YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-String.valueOf(
-YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB),
-ResourceInformation.MEMORY_MB.getName()},
-{ResourceInformation.VCORES.getName(),
-YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
-String.valueOf(
-
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES),
-ResourceInformation.VCORES.getName()}};
-for (String[] arr : resourceTypesKeys) {
-  String resourceTypesKey =
-  YarnConfiguration.RESOURCE_TYPES + "." + arr[0] + MINIMUM_ALLOCATION;
-  long minimumResourceTypes = conf.getLong(resourceTypesKey, -1);
-  long minimumConf = conf.getLong(arr[1], -1);
-  long minimum;
-  if (minimumResourceTypes != -1) {
-minimum = minimumResourceTypes;
-if (minimumConf != -1) {
-  LOG.warn("Using minimum allocation for memory specified in "
-  + "resource-types config file with key "
-  + minimumResourceTypes + ", ignoring minimum specified using "
-  + arr[1]);
-}
-  } else {
-minimum = conf.getLong(arr[1], Long.parseLong(arr[2]));
-  }
-  ResourceInformation ri = res.get(arr[3]);
-  ri.setMinimumAllocation(minimum);
-}
-  }
-
-  private static void setMaximumAllocationForMandatoryResources(
-  Map res, Configuration conf) {
-String[][] resourceTypesKeys = {
-{ResourceInformation.MEMORY_MB.getName(),
-YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
-String.valueOf(
-YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB),
-ResourceInformation.MEMORY_MB.getName()},
-{ResourceInformation.VCORES.getName(),
-YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
-String.valueOf(
-
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES),
-ResourceInformation.VCORES.getName()}};
-for (String[] arr : resourceTypesKeys) {
-  String resourceTypesKey =
-  YarnConfiguration.RESOURCE_TYPES + "." + arr[0] + MAXIMUM_ALLOCATION;
-  long maximumResourceTypes = conf.getLong(resourceTypesKey, -1);
-  long maximumConf = conf.getLong(arr[1], -1);
-  long maximum;
-  if (maximumResourceTypes != -1) {
-maximum = maximumResourceTypes;
-if (maximumConf != -1) {
-  LOG.warn("Using maximum allocation for memory specified in "
-  + "resource-types config file with key "
-  + maximumResourceTypes + ", ignoring maximum specified using "
-  + arr[1]);
-}
-  } else {
-maximum = conf.getLong(arr[1], Long.parseLong(arr[2]));
-  }
-  

[1/3] hadoop git commit: YARN-7414. FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() (Contributed by Soumabrata Chakraborty via Daniel Templeton)

2017-11-15 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2f379d412 -> b246c5474


YARN-7414. FairScheduler#getAppWeight() should be moved into 
FSAppAttempt#getWeight()
(Contributed by Soumabrata Chakraborty via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b246c547
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b246c547
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b246c547

Branch: refs/heads/trunk
Commit: b246c547490dd94271806ca4caf1e5f199f0fb09
Parents: e094eb7
Author: Daniel Templeton 
Authored: Wed Nov 15 09:56:37 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 15 10:03:29 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java   | 15 ++-
 .../scheduler/fair/FairScheduler.java  | 17 ++---
 2 files changed, 16 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b246c547/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index bbd4418..94991eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -1304,7 +1304,20 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
   @Override
   public float getWeight() {
-return scheduler.getAppWeight(this);
+double weight = 1.0;
+
+if (scheduler.isSizeBasedWeight()) {
+  scheduler.getSchedulerReadLock().lock();
+
+  try {
+// Set weight based on current memory demand
+weight = Math.log1p(getDemand().getMemorySize()) / Math.log(2);
+  } finally {
+scheduler.getSchedulerReadLock().unlock();
+  }
+}
+
+return (float)weight * this.getPriority().getPriority();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b246c547/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 7f1b91e..b2978d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -368,21 +368,8 @@ public class FairScheduler extends
 return rmContext.getContainerTokenSecretManager();
   }
 
-  public float getAppWeight(FSAppAttempt app) {
-double weight = 1.0;
-
-if (sizeBasedWeight) {
-  readLock.lock();
-
-  try {
-// Set weight based on current memory demand
-weight = Math.log1p(app.getDemand().getMemorySize()) / Math.log(2);
-  } finally {
-readLock.unlock();
-  }
-}
-
-return (float)weight * app.getPriority().getPriority();
+  public boolean isSizeBasedWeight() {
+return sizeBasedWeight;
   }
 
   public Resource getIncrementResourceCapability() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7369. Improve the resource types docs

2017-11-13 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 5b55a74ba -> c2f479c24


YARN-7369. Improve the resource types docs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2f479c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2f479c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2f479c2

Branch: refs/heads/branch-3.0
Commit: c2f479c24842fe6f73aaf3786f8732aa97e7ab49
Parents: 5b55a74
Author: Daniel Templeton 
Authored: Mon Nov 13 11:06:04 2017 -0800
Committer: Daniel Templeton 
Committed: Mon Nov 13 11:08:58 2017 -0800

--
 hadoop-project/src/site/site.xml|   2 +-
 .../src/site/markdown/ResourceModel.md  | 217 +++
 .../src/site/markdown/ResourceProfiles.md   |  79 ---
 3 files changed, 218 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2f479c2/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 2aa1da7..35aa822 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -128,6 +128,7 @@
   
   
   
+  
   
   
   
@@ -144,7 +145,6 @@
   
   
   
-  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2f479c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
new file mode 100644
index 000..ce968ce
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
@@ -0,0 +1,217 @@
+
+
+Hadoop: YARN Resource Configuration
+===
+
+Overview
+
+YARN supports an extensible resource model. By default YARN tracks CPU and
+memory for all nodes, applications, and queues, but the resource definition
+can be extended to include arbitrary "countable" resources. A countable
+resource is a resource that is consumed while a container is running, but is
+released afterwards. CPU and memory are both countable resources. Other 
examples
+include GPU resources and software licenses.
+
+Configuration
+-
+
+The following configuration properties are supported. See below for details.
+
+`resource-types.xml`
+
+| Configuration Property | Value | Description |
+|: |: |: |
+| `yarn.resource-types` | Comma-separated list of additional resources. May 
not include `memory`, `memory-mb`, or `vcores` |
+| `yarn.resource-types..units` | Default unit for the specified 
resource type |
+| `yarn.resource-types..minimum` | The minimum request for the 
specified resource type |
+| `yarn.resource-types..maximum` | The maximum request for the 
specified resource type |
+
+`node­-resources.xml`
+
+| Configuration Property | Value | Description |
+|: |: |: |
+| `yarn.nodemanager.resource-type.` | The count of the specified 
resource available from the node manager |
+
+Please note that the `resource-types.xml` and `node­-resources.xml` files
+also need to be placed in the same configuration directory as `yarn-site.xml` 
if
+they are used. Alternatively, the properties may be placed into the
+`yarn-site.xml` file instead.
+
+YARN Resource Model
+---
+
+### Resource Manager
+The resource manager is the final arbiter of what resources in the cluster are
+tracked. The resource manager loads its resource definition from XML
+configuration files. For example, to define a new resource in addition to
+CPU and memory, the following property should be configured:
+
+```xml
+
+  
+yarn.resource-types
+resource1,resource2
+
+The resources to be used for scheduling. Use resource-types.xml
+to specify details about the individual resource types.
+
+  
+
+```
+
+A valid resource name must begin with a letter and contain only letters, 
numbers,
+and any of: '.', '_', or '-'. A valid resource name may also be optionally
+preceded by a name space followed by a slash. A valid name space consists of
+period-separated groups of letters, numbers, and dashes. For example, the
+following are valid resource names:
+
+* myresource
+* my_resource
+* My-Resource01
+* com.acme/myresource
+
+The following are examples of invalid resource names:
+
+* 10myresource
+* my resource
+* com/acme/myresource
+* $NS/myresource
+* -none-/myresource
+
+For each new resource type defined an optional unit property can be added to
+set the default unit for the 

hadoop git commit: YARN-7369. Improve the resource types docs

2017-11-13 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e512f016 -> 040a38dc4


YARN-7369. Improve the resource types docs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/040a38dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/040a38dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/040a38dc

Branch: refs/heads/trunk
Commit: 040a38dc493adf44e9552b8971acf36188c30152
Parents: 2e512f0
Author: Daniel Templeton 
Authored: Mon Nov 13 11:05:07 2017 -0800
Committer: Daniel Templeton 
Committed: Mon Nov 13 11:05:07 2017 -0800

--
 hadoop-project/src/site/site.xml|   2 +-
 .../src/site/markdown/ResourceModel.md  | 275 +++
 .../src/site/markdown/ResourceProfiles.md   | 116 
 3 files changed, 276 insertions(+), 117 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/040a38dc/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 57cff9a..be48ddb 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -128,6 +128,7 @@
   
   
   
+  
   
   
   
@@ -143,7 +144,6 @@
   
   
   
-  
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/040a38dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
new file mode 100644
index 000..75e5c92
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
@@ -0,0 +1,275 @@
+
+
+Hadoop: YARN Resource Configuration
+===
+
+Overview
+
+YARN supports an extensible resource model. By default YARN tracks CPU and
+memory for all nodes, applications, and queues, but the resource definition
+can be extended to include arbitrary "countable" resources. A countable
+resource is a resource that is consumed while a container is running, but is
+released afterwards. CPU and memory are both countable resources. Other 
examples
+include GPU resources and software licenses.
+
+In addition, YARN also supports the use of "resource profiles", which allow a
+user to specify multiple resource requests through a single profile, similar to
+Amazon Web Services Elastic Compute Cluster instance types. For example,
+"large" might mean 8 virtual cores and 16GB RAM.
+
+Configuration
+-
+
+The following configuration properties are supported. See below for details.
+
+`yarn-site.xml`
+
+| Configuration Property | Description |
+|: |: |
+| `yarn.resourcemanager.resource-profiles.enabled` | Indicates whether 
resource profiles support is enabled. Defaults to `false`. |
+
+`resource-types.xml`
+
+| Configuration Property | Value | Description |
+|: |: |: |
+| `yarn.resource-types` | Comma-separated list of additional resources. May 
not include `memory`, `memory-mb`, or `vcores` |
+| `yarn.resource-types..units` | Default unit for the specified 
resource type |
+| `yarn.resource-types..minimum` | The minimum request for the 
specified resource type |
+| `yarn.resource-types..maximum` | The maximum request for the 
specified resource type |
+
+`node­-resources.xml`
+
+| Configuration Property | Value | Description |
+|: |: |: |
+| `yarn.nodemanager.resource-type.` | The count of the specified 
resource available from the node manager |
+
+Please note that the `resource-types.xml` and `node­-resources.xml` files
+also need to be placed in the same configuration directory as `yarn-site.xml` 
if
+they are used. Alternatively, the properties may be placed into the
+`yarn-site.xml` file instead.
+
+YARN Resource Model
+---
+
+### Resource Manager
+The resource manager is the final arbiter of what resources in the cluster are
+tracked. The resource manager loads its resource definition from XML
+configuration files. For example, to define a new resource in addition to
+CPU and memory, the following property should be configured:
+
+```xml
+
+  
+yarn.resource-types
+resource1,resource2
+
+The resources to be used for scheduling. Use resource-types.xml
+to specify details about the individual resource types.
+
+  
+
+```
+
+A valid resource name must begin with a letter and contain only letters, 
numbers,
+and any of: '.', '_', or '-'. A valid resource name may also be optionally
+preceded by a name space 

hadoop git commit: YARN-7442. [YARN-7069] Limit format of resource type name (Contributed by Wangda Tan via Daniel Templeton)

2017-11-13 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f90f77238 -> 5b55a74ba


YARN-7442. [YARN-7069] Limit format of resource type name (Contributed by 
Wangda Tan via Daniel Templeton)

(cherry picked from commit 2c6213a44280f5b3950167131293ff83f48ff56f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b55a74b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b55a74b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b55a74b

Branch: refs/heads/branch-3.0
Commit: 5b55a74bad2fe02bc41ffc3e13d46468d4c92d22
Parents: f90f772
Author: Daniel Templeton 
Authored: Mon Nov 13 10:37:30 2017 -0800
Committer: Daniel Templeton 
Committed: Mon Nov 13 11:03:56 2017 -0800

--
 .../yarn/api/records/ResourceInformation.java   |  5 +++
 .../yarn/util/resource/ResourceUtils.java   | 26 ++
 .../yarn/util/resource/TestResourceUtils.java   | 37 
 3 files changed, 68 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b55a74b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index cc61d86..e8280ba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -55,6 +55,11 @@ public class ResourceInformation implements 
Comparable {
   /**
* Set the name for the resource.
*
+   * A valid resource name must begin with a letter and contain only letters,
+   * numbers, and any of: '.', '_', or '-'. A valid resource name may also be
+   * optionally preceded by a name space followed by a slash. A valid name 
space
+   * consists of period-separated groups of letters, numbers, and dashes."
+   *
* @param rName name for the resource
*/
   public void setName(String rName) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b55a74b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 002a6de..540cd9e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -62,6 +62,10 @@ public class ResourceUtils {
   private static final Pattern RESOURCE_REQUEST_VALUE_PATTERN =
   Pattern.compile("^([0-9]+) ?([a-zA-Z]*)$");
 
+  private static final Pattern RESOURCE_NAME_PATTERN = Pattern.compile(
+  "^(((\\p{Alnum}([\\p{Alnum}-]*\\p{Alnum})?\\.)*"
+  + "\\p{Alnum}([\\p{Alnum}-]*\\p{Alnum})?)/)?\\p{Alpha}([\\w.-]*)$");
+
   private static volatile boolean initializedResources = false;
   private static final Map RESOURCE_NAME_TO_INDEX =
   new ConcurrentHashMap();
@@ -210,6 +214,23 @@ public class ResourceUtils {
   }
 
   @VisibleForTesting
+  static void validateNameOfResourceNameAndThrowException(String resourceName)
+  throws YarnRuntimeException {
+Matcher matcher = RESOURCE_NAME_PATTERN.matcher(resourceName);
+if (!matcher.matches()) {
+  String message = String.format(
+  "'%s' is not a valid resource name. A valid resource name must"
+  + " begin with a letter and contain only letters, numbers, "
+  + "and any of: '.', '_', or '-'. A valid resource name may also"
+  + " be optionally preceded by a name space followed by a slash."
+  + " A valid name space consists of period-separated groups of"
+  + " letters, numbers, and dashes.",
+  resourceName);
+  throw new YarnRuntimeException(message);
+}
+  }
+
+  @VisibleForTesting
   static void initializeResourcesMap(Configuration conf) {
 
 Map resourceInformationMap = new HashMap<>();
@@ -247,6 +268,11 @@ public class ResourceUtils {
   }
 }
 
+// Validate names of 

hadoop git commit: YARN-7442. [YARN-7069] Limit format of resource type name (Contributed by Wangda Tan via Daniel Templeton)

2017-11-13 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk fa4b5c669 -> 2e512f016


YARN-7442. [YARN-7069] Limit format of resource type name (Contributed by 
Wangda Tan via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e512f01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e512f01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e512f01

Branch: refs/heads/trunk
Commit: 2e512f016ed689b5afbf1e27fdcd7c9f75b6dc9c
Parents: fa4b5c6
Author: Daniel Templeton 
Authored: Mon Nov 13 10:37:30 2017 -0800
Committer: Daniel Templeton 
Committed: Mon Nov 13 11:03:30 2017 -0800

--
 .../yarn/api/records/ResourceInformation.java   |  5 +++
 .../yarn/util/resource/ResourceUtils.java   | 26 ++
 .../yarn/util/resource/TestResourceUtils.java   | 37 
 3 files changed, 68 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e512f01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 59908ef..67592cc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -65,6 +65,11 @@ public class ResourceInformation implements 
Comparable {
   /**
* Set the name for the resource.
*
+   * A valid resource name must begin with a letter and contain only letters,
+   * numbers, and any of: '.', '_', or '-'. A valid resource name may also be
+   * optionally preceded by a name space followed by a slash. A valid name 
space
+   * consists of period-separated groups of letters, numbers, and dashes."
+   *
* @param rName name for the resource
*/
   public void setName(String rName) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e512f01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 1170c72..3deace8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -62,6 +62,10 @@ public class ResourceUtils {
   private static final Pattern RESOURCE_REQUEST_VALUE_PATTERN =
   Pattern.compile("^([0-9]+) ?([a-zA-Z]*)$");
 
+  private static final Pattern RESOURCE_NAME_PATTERN = Pattern.compile(
+  "^(((\\p{Alnum}([\\p{Alnum}-]*\\p{Alnum})?\\.)*"
+  + "\\p{Alnum}([\\p{Alnum}-]*\\p{Alnum})?)/)?\\p{Alpha}([\\w.-]*)$");
+
   private static volatile boolean initializedResources = false;
   private static final Map RESOURCE_NAME_TO_INDEX =
   new ConcurrentHashMap();
@@ -209,6 +213,23 @@ public class ResourceUtils {
   }
 
   @VisibleForTesting
+  static void validateNameOfResourceNameAndThrowException(String resourceName)
+  throws YarnRuntimeException {
+Matcher matcher = RESOURCE_NAME_PATTERN.matcher(resourceName);
+if (!matcher.matches()) {
+  String message = String.format(
+  "'%s' is not a valid resource name. A valid resource name must"
+  + " begin with a letter and contain only letters, numbers, "
+  + "and any of: '.', '_', or '-'. A valid resource name may also"
+  + " be optionally preceded by a name space followed by a slash."
+  + " A valid name space consists of period-separated groups of"
+  + " letters, numbers, and dashes.",
+  resourceName);
+  throw new YarnRuntimeException(message);
+}
+  }
+
+  @VisibleForTesting
   static void initializeResourcesMap(Configuration conf) {
 
 Map resourceInformationMap = new HashMap<>();
@@ -246,6 +267,11 @@ public class ResourceUtils {
   }
 }
 
+// Validate names of resource information map.
+for (String name : resourceInformationMap.keySet()) {
+ 

hadoop git commit: YARN-7413. Support resource type in SLS (Contributed by Yufei Gu via Daniel Templeton)

2017-11-09 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 462f6c490 -> ba8136615


YARN-7413. Support resource type in SLS (Contributed by Yufei Gu via Daniel 
Templeton)

Change-Id: Ic0a897c123c5d2f57aae757ca6bcf1dad7b90d2b


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba813661
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba813661
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba813661

Branch: refs/heads/trunk
Commit: ba8136615ab66c450884614557eddc6509d63b7c
Parents: 462f6c4
Author: Daniel Templeton 
Authored: Thu Nov 9 12:09:48 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Nov 9 12:09:48 2017 -0800

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 86 +---
 .../hadoop/yarn/sls/conf/SLSConfiguration.java  |  6 +-
 .../yarn/sls/nodemanager/NMSimulator.java   | 15 ++--
 .../src/site/markdown/SchedulerLoadSimulator.md | 48 +++
 .../yarn/sls/nodemanager/TestNMSimulator.java   |  3 +-
 5 files changed, 117 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba813661/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 9d6c3aa..ad4310f 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
@@ -84,6 +85,7 @@ import org.apache.hadoop.yarn.sls.synthetic.SynthJob;
 import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
 import org.apache.hadoop.yarn.util.UTCClock;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -99,7 +101,7 @@ public class SLSRunner extends Configured implements Tool {
 
   // NM simulator
   private HashMap nmMap;
-  private int nmMemoryMB, nmVCores;
+  private Resource nodeManagerResource;
   private String nodeFile;
 
   // AM simulator
@@ -178,6 +180,30 @@ public class SLSRunner extends Configured implements Tool {
 amClassMap.put(amType, Class.forName(tempConf.get(key)));
   }
 }
+
+nodeManagerResource = getNodeManagerResource();
+  }
+
+  private Resource getNodeManagerResource() {
+Resource resource = Resources.createResource(0);
+ResourceInformation[] infors = ResourceUtils.getResourceTypesArray();
+for (ResourceInformation info : infors) {
+  long value;
+  if (info.getName().equals(ResourceInformation.MEMORY_URI)) {
+value = getConf().getInt(SLSConfiguration.NM_MEMORY_MB,
+SLSConfiguration.NM_MEMORY_MB_DEFAULT);
+  } else if (info.getName().equals(ResourceInformation.VCORES_URI)) {
+value = getConf().getInt(SLSConfiguration.NM_VCORES,
+SLSConfiguration.NM_VCORES_DEFAULT);
+  } else {
+value = getConf().getLong(SLSConfiguration.NM_PREFIX +
+info.getName(), SLSConfiguration.NM_RESOURCE_DEFAULT);
+  }
+
+  resource.setResourceValue(info.getName(), value);
+}
+
+return resource;
   }
 
   /**
@@ -261,10 +287,6 @@ public class SLSRunner extends Configured implements Tool {
 
   private void startNM() throws YarnException, IOException {
 // nm configuration
-nmMemoryMB = getConf().getInt(SLSConfiguration.NM_MEMORY_MB,
-SLSConfiguration.NM_MEMORY_MB_DEFAULT);
-nmVCores = getConf().getInt(SLSConfiguration.NM_VCORES,
-SLSConfiguration.NM_VCORES_DEFAULT);
 int heartbeatInterval =
 getConf().getInt(SLSConfiguration.NM_HEARTBEAT_INTERVAL_MS,
 SLSConfiguration.NM_HEARTBEAT_INTERVAL_MS_DEFAULT);
@@ -304,7 +326,7 @@ public class SLSRunner extends Configured implements Tool {
 for (String hostName : nodeSet) {
   // we randomize the heartbeat start time from zero to 1 interval
   NMSimulator nm = new NMSimulator();
-  nm.init(hostName, nmMemoryMB, nmVCores, 

[1/2] hadoop git commit: YARN-7143. FileNotFound handling in ResourceUtils is inconsistent

2017-11-09 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 91a7f8d24 -> 933a09e88


YARN-7143. FileNotFound handling in ResourceUtils is inconsistent

Change-Id: Ib1bb487e14a15edd2b5a42cf5078c5a2b295f069
(cherry picked from commit db82a41d94872cea4d0c1bb1336916cebc2faeec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/933a09e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/933a09e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/933a09e8

Branch: refs/heads/branch-3.0
Commit: 933a09e88684a6631cc79822343f3181b03f0eb0
Parents: 51d3693
Author: Daniel Templeton 
Authored: Thu Nov 9 10:36:49 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Nov 9 11:59:16 2017 -0800

--
 .../yarn/util/resource/ResourceUtils.java   | 56 
 1 file changed, 23 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/933a09e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 48e3af6..0a2ec4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -343,17 +343,14 @@ public class ResourceUtils {
 if (!initializedResources) {
   synchronized (ResourceUtils.class) {
 if (!initializedResources) {
-  if (conf == null) {
-conf = new YarnConfiguration();
-  }
-  try {
-addResourcesFileToConf(resourceFile, conf);
-  } catch (FileNotFoundException fe) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Unable to find '" + resourceFile + "'.");
-}
+  Configuration resConf = conf;
+
+  if (resConf == null) {
+resConf = new YarnConfiguration();
   }
-  initializeResourcesMap(conf);
+
+  addResourcesFileToConf(resourceFile, resConf);
+  initializeResourcesMap(resConf);
 }
   }
 }
@@ -390,7 +387,7 @@ public class ResourceUtils {
   }
 
   private static void addResourcesFileToConf(String resourceFile,
-  Configuration conf) throws FileNotFoundException {
+  Configuration conf) {
 try {
   InputStream ris = getConfInputStream(resourceFile, conf);
   if (LOG.isDebugEnabled()) {
@@ -398,15 +395,11 @@ public class ResourceUtils {
   }
   conf.addResource(ris);
 } catch (FileNotFoundException fe) {
-  throw fe;
-} catch (IOException ie) {
+  LOG.info("Unable to find '" + resourceFile + "'.");
+} catch (IOException | YarnException ex) {
   LOG.fatal("Exception trying to read resource types configuration '"
-  + resourceFile + "'.", ie);
-  throw new YarnRuntimeException(ie);
-} catch (YarnException ye) {
-  LOG.fatal("YARN Exception trying to read resource types configuration '"
-  + resourceFile + "'.", ye);
-  throw new YarnRuntimeException(ye);
+  + resourceFile + "'.", ex);
+  throw new YarnRuntimeException(ex);
 }
   }
 
@@ -468,22 +461,19 @@ public class ResourceUtils {
   private static Map 
initializeNodeResourceInformation(
   Configuration conf) {
 Map nodeResources = new HashMap<>();
-try {
-  addResourcesFileToConf(
-  YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE, conf);
-  for (Map.Entry entry : conf) {
-String key = entry.getKey();
-String value = entry.getValue();
-if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
-  addResourceInformation(key, value, nodeResources);
-}
-  }
-} catch (FileNotFoundException fe) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Couldn't find node resources file: "
-+ YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE);
+
+addResourcesFileToConf(YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE,
+conf);
+
+for (Map.Entry entry : conf) {
+  String key = entry.getKey();
+  String value = entry.getValue();
+
+  if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
+addResourceInformation(key, value, nodeResources);
   }
 }
+
 return nodeResources;
   }
 



[2/2] hadoop git commit: YARN-7205. Log improvements for the ResourceUtils. (Sunil G via wangda)

2017-11-09 Thread templedf
YARN-7205. Log improvements for the ResourceUtils. (Sunil G via wangda)

Change-Id: I0f5b7a7f68ec5d3e1d52211f83fdd089bc0bfd37
(cherry picked from commit 8bcc49e6771ca75f012211e27870a421b19233e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51d3693a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51d3693a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51d3693a

Branch: refs/heads/branch-3.0
Commit: 51d3693a9c7c4d6d2ed0daea4d1165a13fd31a84
Parents: 91a7f8d
Author: Wangda Tan 
Authored: Wed Oct 11 15:25:28 2017 -0700
Committer: Daniel Templeton 
Committed: Thu Nov 9 11:59:16 2017 -0800

--
 .../yarn/api/records/ResourceTypeInfo.java  |  7 ++--
 .../yarn/util/resource/ResourceUtils.java   | 43 
 2 files changed, 30 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51d3693a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
index b6f7f14..8775342 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
@@ -152,9 +152,10 @@ public abstract class ResourceTypeInfo implements 
Comparable {
   @Override
   public String toString() {
 StringBuilder sb = new StringBuilder();
-

hadoop git commit: YARN-7143. FileNotFound handling in ResourceUtils is inconsistent

2017-11-09 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk a1382a18d -> 462f6c490


YARN-7143. FileNotFound handling in ResourceUtils is inconsistent

Change-Id: Ib1bb487e14a15edd2b5a42cf5078c5a2b295f069


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/462f6c49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/462f6c49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/462f6c49

Branch: refs/heads/trunk
Commit: 462f6c490efd2a38a9ba639bcda47b3aa667f650
Parents: a1382a1
Author: Daniel Templeton 
Authored: Thu Nov 9 10:36:49 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Nov 9 11:58:49 2017 -0800

--
 .../yarn/util/resource/ResourceUtils.java   | 56 
 1 file changed, 23 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/462f6c49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index c9cc27b..9c9c0ef8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -342,17 +342,14 @@ public class ResourceUtils {
 if (!initializedResources) {
   synchronized (ResourceUtils.class) {
 if (!initializedResources) {
-  if (conf == null) {
-conf = new YarnConfiguration();
-  }
-  try {
-addResourcesFileToConf(resourceFile, conf);
-  } catch (FileNotFoundException fe) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Unable to find '" + resourceFile + "'.");
-}
+  Configuration resConf = conf;
+
+  if (resConf == null) {
+resConf = new YarnConfiguration();
   }
-  initializeResourcesMap(conf);
+
+  addResourcesFileToConf(resourceFile, resConf);
+  initializeResourcesMap(resConf);
 }
   }
 }
@@ -389,7 +386,7 @@ public class ResourceUtils {
   }
 
   private static void addResourcesFileToConf(String resourceFile,
-  Configuration conf) throws FileNotFoundException {
+  Configuration conf) {
 try {
   InputStream ris = getConfInputStream(resourceFile, conf);
   if (LOG.isDebugEnabled()) {
@@ -397,15 +394,11 @@ public class ResourceUtils {
   }
   conf.addResource(ris);
 } catch (FileNotFoundException fe) {
-  throw fe;
-} catch (IOException ie) {
+  LOG.info("Unable to find '" + resourceFile + "'.");
+} catch (IOException | YarnException ex) {
   LOG.fatal("Exception trying to read resource types configuration '"
-  + resourceFile + "'.", ie);
-  throw new YarnRuntimeException(ie);
-} catch (YarnException ye) {
-  LOG.fatal("YARN Exception trying to read resource types configuration '"
-  + resourceFile + "'.", ye);
-  throw new YarnRuntimeException(ye);
+  + resourceFile + "'.", ex);
+  throw new YarnRuntimeException(ex);
 }
   }
 
@@ -467,22 +460,19 @@ public class ResourceUtils {
   private static Map 
initializeNodeResourceInformation(
   Configuration conf) {
 Map nodeResources = new HashMap<>();
-try {
-  addResourcesFileToConf(
-  YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE, conf);
-  for (Map.Entry entry : conf) {
-String key = entry.getKey();
-String value = entry.getValue();
-if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
-  addResourceInformation(key, value, nodeResources);
-}
-  }
-} catch (FileNotFoundException fe) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Couldn't find node resources file: "
-+ YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE);
+
+addResourcesFileToConf(YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE,
+conf);
+
+for (Map.Entry entry : conf) {
+  String key = entry.getKey();
+  String value = entry.getValue();
+
+  if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
+addResourceInformation(key, value, nodeResources);
   }
 }
+
 return nodeResources;
   }
 


-
To unsubscribe, 

hadoop git commit: YARN-7458. TestContainerManagerSecurity is still flakey (Contributed by Robert Kanter via Daniel Templeton)

2017-11-08 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d2b16e3e8 -> 9bcc9e3f4


YARN-7458. TestContainerManagerSecurity is still flakey
(Contributed by Robert Kanter via Daniel Templeton)

Change-Id: Ibb1975ad086c3a33f8af0b4f8b9a13c3cdca3f7d
(cherry picked from commit 49b4c0b334e5472dbbf71b042a6a6b1d4b2ce3b7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9bcc9e3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9bcc9e3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9bcc9e3f

Branch: refs/heads/branch-3.0
Commit: 9bcc9e3f4dbf0349e1bc02a9fdd6b56174d0ddce
Parents: d2b16e3
Author: Daniel Templeton 
Authored: Wed Nov 8 17:31:14 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 8 17:32:34 2017 -0800

--
 .../server/TestContainerManagerSecurity.java| 38 
 1 file changed, 24 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bcc9e3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index 1cbad70..ad2f68a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -28,7 +28,9 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataInputBuffer;
@@ -36,6 +38,7 @@ import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
@@ -49,6 +52,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -404,27 +408,33 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   newContainerToken, attempt1NMToken, false).isEmpty());
   }
 
-  private void waitForContainerToFinishOnNM(ContainerId containerId) {
+  private void waitForContainerToFinishOnNM(ContainerId containerId)
+  throws TimeoutException, InterruptedException {
 Context nmContext = yarnCluster.getNodeManager(0).getNMContext();
 int interval = 4 * 60; // Max time for container token to expire.
 
-Assert.assertNotNull(nmContext.getContainers().containsKey(containerId));
-
-// Get the container first, as it may be removed from the Context
-// by asynchronous calls.
-// This was leading to a flakey test as otherwise the container could
-// be removed and end up null.
+// If the container is null, then it has already completed and been removed
+// from the Context by asynchronous calls.
 Container waitContainer = nmContext.getContainers().get(containerId);
-
-while ((interval-- > 0)
-&& !waitContainer.cloneAndGetContainerStatus()
-.getState().equals(ContainerState.COMPLETE)) {
+if (waitContainer != null) {
   try {
-LOG.info("Waiting for " + containerId + " to complete.");
-Thread.sleep(1000);
-  } catch (InterruptedException e) {
+LOG.info("Waiting for " + containerId + " to get to state " +
+ContainerState.COMPLETE);
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean 

hadoop git commit: YARN-7458. TestContainerManagerSecurity is still flakey (Contributed by Robert Kanter via Daniel Templeton)

2017-11-08 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0de10680b -> 49b4c0b33


YARN-7458. TestContainerManagerSecurity is still flakey
(Contributed by Robert Kanter via Daniel Templeton)

Change-Id: Ibb1975ad086c3a33f8af0b4f8b9a13c3cdca3f7d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49b4c0b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49b4c0b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49b4c0b3

Branch: refs/heads/trunk
Commit: 49b4c0b334e5472dbbf71b042a6a6b1d4b2ce3b7
Parents: 0de1068
Author: Daniel Templeton 
Authored: Wed Nov 8 17:31:14 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 8 17:31:14 2017 -0800

--
 .../server/TestContainerManagerSecurity.java| 38 
 1 file changed, 24 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49b4c0b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index 1cbad70..ad2f68a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -28,7 +28,9 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataInputBuffer;
@@ -36,6 +38,7 @@ import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
@@ -49,6 +52,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -404,27 +408,33 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   newContainerToken, attempt1NMToken, false).isEmpty());
   }
 
-  private void waitForContainerToFinishOnNM(ContainerId containerId) {
+  private void waitForContainerToFinishOnNM(ContainerId containerId)
+  throws TimeoutException, InterruptedException {
 Context nmContext = yarnCluster.getNodeManager(0).getNMContext();
 int interval = 4 * 60; // Max time for container token to expire.
 
-Assert.assertNotNull(nmContext.getContainers().containsKey(containerId));
-
-// Get the container first, as it may be removed from the Context
-// by asynchronous calls.
-// This was leading to a flakey test as otherwise the container could
-// be removed and end up null.
+// If the container is null, then it has already completed and been removed
+// from the Context by asynchronous calls.
 Container waitContainer = nmContext.getContainers().get(containerId);
-
-while ((interval-- > 0)
-&& !waitContainer.cloneAndGetContainerStatus()
-.getState().equals(ContainerState.COMPLETE)) {
+if (waitContainer != null) {
   try {
-LOG.info("Waiting for " + containerId + " to complete.");
-Thread.sleep(1000);
-  } catch (InterruptedException e) {
+LOG.info("Waiting for " + containerId + " to get to state " +
+ContainerState.COMPLETE);
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return ContainerState.COMPLETE.equals(
+

hadoop git commit: YARN-7166. Container REST endpoints should report resource types

2017-11-08 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 91e726c07 -> d2b16e3e8


YARN-7166. Container REST endpoints should report resource types

Change-Id: If9c2fe58d4cf758bb6b6cf363dc01f35f8720987
(cherry picked from commit 0de10680b7e5a9dfc85173bcfd338fd3656aa57f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2b16e3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2b16e3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2b16e3e

Branch: refs/heads/branch-3.0
Commit: d2b16e3e8347618c2cce3a76d0fcf62d657b0e93
Parents: 91e726c
Author: Daniel Templeton 
Authored: Wed Nov 8 16:43:49 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 8 16:51:20 2017 -0800

--
 .../yarn/server/webapp/dao/ContainerInfo.java   | 39 +---
 1 file changed, 34 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b16e3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
index 1a5ee85..26a822c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.yarn.server.webapp.dao;
 
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
@@ -27,6 +30,8 @@ import 
org.apache.hadoop.classification.InterfaceStability.Evolving;
 
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.util.Times;
 
 @Public
@@ -49,20 +54,18 @@ public class ContainerInfo {
   protected ContainerState containerState;
   protected String nodeHttpAddress;
   protected String nodeId;
+  protected Map allocatedResources;
 
   public ContainerInfo() {
 // JAXB needs this
   }
 
   public ContainerInfo(ContainerReport container) {
-containerId = container.getContainerId().toString();
-if (container.getAllocatedResource() != null) {
-  allocatedMB = container.getAllocatedResource().getMemorySize();
-  allocatedVCores = container.getAllocatedResource().getVirtualCores();
-}
 if (container.getAssignedNode() != null) {
   assignedNodeId = container.getAssignedNode().toString();
 }
+
+containerId = container.getContainerId().toString();
 priority = container.getPriority().getPriority();
 startedTime = container.getCreationTime();
 finishedTime = container.getFinishTime();
@@ -73,6 +76,22 @@ public class ContainerInfo {
 containerState = container.getContainerState();
 nodeHttpAddress = container.getNodeHttpAddress();
 nodeId = container.getAssignedNode().toString();
+
+Resource allocated = container.getAllocatedResource();
+
+if (allocated != null) {
+  allocatedMB = allocated.getMemorySize();
+  allocatedVCores = allocated.getVirtualCores();
+
+  // Now populate the allocated resources. This map will include memory
+  // and CPU, because it's where they belong. We still keep allocatedMB
+  // and allocatedVCores so that we don't break the API.
+  allocatedResources = new HashMap<>();
+
+  for (ResourceInformation info : allocated.getResources()) {
+allocatedResources.put(info.getName(), info.getValue());
+  }
+}
   }
 
   public String getContainerId() {
@@ -130,4 +149,14 @@ public class ContainerInfo {
   public String getNodeId() {
 return nodeId;
   }
+
+  /**
+   * Return a map of the allocated resources. The map key is the resource name,
+   * and the value is the resource value.
+   *
+   * @return the allocated resources map
+   */
+  public Map getAllocatedResources() {
+return Collections.unmodifiableMap(allocatedResources);
+  }
 }


-
To 

hadoop git commit: YARN-7166. Container REST endpoints should report resource types

2017-11-08 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk cb35a5958 -> 0de10680b


YARN-7166. Container REST endpoints should report resource types

Change-Id: If9c2fe58d4cf758bb6b6cf363dc01f35f8720987


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0de10680
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0de10680
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0de10680

Branch: refs/heads/trunk
Commit: 0de10680b7e5a9dfc85173bcfd338fd3656aa57f
Parents: cb35a59
Author: Daniel Templeton 
Authored: Wed Nov 8 16:43:49 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 8 16:43:49 2017 -0800

--
 .../yarn/server/webapp/dao/ContainerInfo.java   | 39 +---
 1 file changed, 34 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de10680/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
index 1a5ee85..26a822c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.yarn.server.webapp.dao;
 
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
@@ -27,6 +30,8 @@ import 
org.apache.hadoop.classification.InterfaceStability.Evolving;
 
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.util.Times;
 
 @Public
@@ -49,20 +54,18 @@ public class ContainerInfo {
   protected ContainerState containerState;
   protected String nodeHttpAddress;
   protected String nodeId;
+  protected Map allocatedResources;
 
   public ContainerInfo() {
 // JAXB needs this
   }
 
   public ContainerInfo(ContainerReport container) {
-containerId = container.getContainerId().toString();
-if (container.getAllocatedResource() != null) {
-  allocatedMB = container.getAllocatedResource().getMemorySize();
-  allocatedVCores = container.getAllocatedResource().getVirtualCores();
-}
 if (container.getAssignedNode() != null) {
   assignedNodeId = container.getAssignedNode().toString();
 }
+
+containerId = container.getContainerId().toString();
 priority = container.getPriority().getPriority();
 startedTime = container.getCreationTime();
 finishedTime = container.getFinishTime();
@@ -73,6 +76,22 @@ public class ContainerInfo {
 containerState = container.getContainerState();
 nodeHttpAddress = container.getNodeHttpAddress();
 nodeId = container.getAssignedNode().toString();
+
+Resource allocated = container.getAllocatedResource();
+
+if (allocated != null) {
+  allocatedMB = allocated.getMemorySize();
+  allocatedVCores = allocated.getVirtualCores();
+
+  // Now populate the allocated resources. This map will include memory
+  // and CPU, because it's where they belong. We still keep allocatedMB
+  // and allocatedVCores so that we don't break the API.
+  allocatedResources = new HashMap<>();
+
+  for (ResourceInformation info : allocated.getResources()) {
+allocatedResources.put(info.getName(), info.getValue());
+  }
+}
   }
 
   public String getContainerId() {
@@ -130,4 +149,14 @@ public class ContainerInfo {
   public String getNodeId() {
 return nodeId;
   }
+
+  /**
+   * Return a map of the allocated resources. The map key is the resource name,
+   * and the value is the resource value.
+   *
+   * @return the allocated resources map
+   */
+  public Map getAllocatedResources() {
+return Collections.unmodifiableMap(allocatedResources);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional 

hadoop git commit: YARN-7401. Reduce lock contention in ClusterNodeTracker#getClusterCapacity()

2017-11-07 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 91e95dc48 -> 4e7b5824b


YARN-7401. Reduce lock contention in ClusterNodeTracker#getClusterCapacity()

(cherry picked from commit 8db9d61ac2e04888cb228b29fe54b41c730cf0e6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e7b5824
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e7b5824
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e7b5824

Branch: refs/heads/branch-3.0
Commit: 4e7b5824b4aa36267ed0a64cd7aad48760dfe62a
Parents: 91e95dc
Author: Daniel Templeton 
Authored: Tue Nov 7 14:53:48 2017 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 7 14:58:56 2017 -0800

--
 .../scheduler/ClusterNodeTracker.java | 18 ++
 1 file changed, 6 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e7b5824/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index ccec6bc..60ef390 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -55,8 +55,9 @@ public class ClusterNodeTracker {
   private Map nodeNameToNodeMap = new HashMap<>();
   private Map nodesPerRack = new HashMap<>();
 
-  private Resource clusterCapacity = Resources.clone(Resources.none());
-  private Resource staleClusterCapacity = null;
+  private final Resource clusterCapacity = Resources.clone(Resources.none());
+  private volatile Resource staleClusterCapacity =
+  Resources.clone(Resources.none());
 
   // Max allocation
   private long maxNodeMemory = -1;
@@ -82,6 +83,7 @@ public class ClusterNodeTracker {
 
   // Update cluster capacity
   Resources.addTo(clusterCapacity, node.getTotalResource());
+  staleClusterCapacity = Resources.clone(clusterCapacity);
 
   // Update maximumAllocation
   updateMaxResources(node, true);
@@ -139,16 +141,7 @@ public class ClusterNodeTracker {
   }
 
   public Resource getClusterCapacity() {
-readLock.lock();
-try {
-  if (staleClusterCapacity == null ||
-  !Resources.equals(staleClusterCapacity, clusterCapacity)) {
-staleClusterCapacity = Resources.clone(clusterCapacity);
-  }
-  return staleClusterCapacity;
-} finally {
-  readLock.unlock();
-}
+return staleClusterCapacity;
   }
 
   public N removeNode(NodeId nodeId) {
@@ -175,6 +168,7 @@ public class ClusterNodeTracker {
 
   // Update cluster capacity
   Resources.subtractFrom(clusterCapacity, node.getTotalResource());
+  staleClusterCapacity = Resources.clone(clusterCapacity);
 
   // Update maximumAllocation
   updateMaxResources(node, false);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7401. Reduce lock contention in ClusterNodeTracker#getClusterCapacity()

2017-11-07 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 13fa2d4e3 -> 8db9d61ac


YARN-7401. Reduce lock contention in ClusterNodeTracker#getClusterCapacity()


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8db9d61a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8db9d61a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8db9d61a

Branch: refs/heads/trunk
Commit: 8db9d61ac2e04888cb228b29fe54b41c730cf0e6
Parents: 13fa2d4
Author: Daniel Templeton 
Authored: Tue Nov 7 14:53:48 2017 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 7 14:53:48 2017 -0800

--
 .../scheduler/ClusterNodeTracker.java | 18 ++
 1 file changed, 6 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8db9d61a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index ccec6bc..60ef390 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -55,8 +55,9 @@ public class ClusterNodeTracker {
   private Map nodeNameToNodeMap = new HashMap<>();
   private Map nodesPerRack = new HashMap<>();
 
-  private Resource clusterCapacity = Resources.clone(Resources.none());
-  private Resource staleClusterCapacity = null;
+  private final Resource clusterCapacity = Resources.clone(Resources.none());
+  private volatile Resource staleClusterCapacity =
+  Resources.clone(Resources.none());
 
   // Max allocation
   private long maxNodeMemory = -1;
@@ -82,6 +83,7 @@ public class ClusterNodeTracker {
 
   // Update cluster capacity
   Resources.addTo(clusterCapacity, node.getTotalResource());
+  staleClusterCapacity = Resources.clone(clusterCapacity);
 
   // Update maximumAllocation
   updateMaxResources(node, true);
@@ -139,16 +141,7 @@ public class ClusterNodeTracker {
   }
 
   public Resource getClusterCapacity() {
-readLock.lock();
-try {
-  if (staleClusterCapacity == null ||
-  !Resources.equals(staleClusterCapacity, clusterCapacity)) {
-staleClusterCapacity = Resources.clone(clusterCapacity);
-  }
-  return staleClusterCapacity;
-} finally {
-  readLock.unlock();
-}
+return staleClusterCapacity;
   }
 
   public N removeNode(NodeId nodeId) {
@@ -175,6 +168,7 @@ public class ClusterNodeTracker {
 
   // Update cluster capacity
   Resources.subtractFrom(clusterCapacity, node.getTotalResource());
+  staleClusterCapacity = Resources.clone(clusterCapacity);
 
   // Update maximumAllocation
   updateMaxResources(node, false);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7146. Many RM unit tests failing with FairScheduler (Contributed by Robert Kanter via Daniel Templeton)

2017-11-01 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 d6a5f8a06 -> 951d5a46f


YARN-7146. Many RM unit tests failing with FairScheduler
(Contributed by Robert Kanter via Daniel Templeton)

(cherry picked from commit 956d814034c98a7e49a759f692fbf5df1ba04e09)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/951d5a46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/951d5a46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/951d5a46

Branch: refs/heads/branch-2.9
Commit: 951d5a46f4504e651abdb90db9a7218a178566a9
Parents: d6a5f8a
Author: Daniel Templeton 
Authored: Wed Nov 1 14:11:26 2017 -0700
Committer: Daniel Templeton 
Committed: Wed Nov 1 14:13:42 2017 -0700

--
 .../yarn/client/api/impl/TestYarnClient.java|  10 +-
 .../scheduler/AbstractYarnScheduler.java|  86 ++
 .../scheduler/fair/FairScheduler.java   |  67 +---
 .../yarn/server/resourcemanager/MockRM.java |   5 +
 .../ParameterizedSchedulerTestBase.java |  66 +---
 .../server/resourcemanager/RMHATestBase.java|   2 +
 .../TestNodeBlacklistingOnAMFailures.java   |  12 ++
 .../yarn/server/resourcemanager/TestRM.java |   6 +
 .../resourcemanager/TestRMAdminService.java |   5 +
 .../server/resourcemanager/TestRMRestart.java   |   5 +
 .../TestWorkPreservingRMRestart.java|   4 +
 .../TestWorkPreservingUnmanagedAM.java  |   4 +-
 .../reservation/TestReservationSystem.java  |   4 +
 .../resourcetracker/TestNMReconnect.java|   5 +
 .../scheduler/TestAbstractYarnScheduler.java|  42 -
 .../TestSchedulingWithAllocationRequestId.java  | 155 ---
 .../fair/TestContinuousScheduling.java  |  11 +-
 .../policy/TestFairOrderingPolicy.java  |   6 +-
 .../security/TestClientToAMTokens.java  |   4 +
 19 files changed, 306 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/951d5a46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 0cac8c0..d993fcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -121,6 +122,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystemTestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
@@ -131,14 +133,16 @@ import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 
 public class TestYarnClient {
 
-  @Test
-  public void test() {
-// More to come later.
+  @Before
+  public void setup() {
+QueueMetrics.clearQueueMetrics();
+DefaultMetricsSystem.setMiniClusterMode(true);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/951d5a46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 

hadoop git commit: YARN-7146. Many RM unit tests failing with FairScheduler (Contributed by Robert Kanter via Daniel Templeton)

2017-11-01 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8180ab436 -> 956d81403


YARN-7146. Many RM unit tests failing with FairScheduler
(Contributed by Robert Kanter via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/956d8140
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/956d8140
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/956d8140

Branch: refs/heads/branch-2
Commit: 956d814034c98a7e49a759f692fbf5df1ba04e09
Parents: 8180ab4
Author: Daniel Templeton 
Authored: Wed Nov 1 14:11:26 2017 -0700
Committer: Daniel Templeton 
Committed: Wed Nov 1 14:11:26 2017 -0700

--
 .../yarn/client/api/impl/TestYarnClient.java|  10 +-
 .../scheduler/AbstractYarnScheduler.java|  86 ++
 .../scheduler/fair/FairScheduler.java   |  67 +---
 .../yarn/server/resourcemanager/MockRM.java |   5 +
 .../ParameterizedSchedulerTestBase.java |  66 +---
 .../server/resourcemanager/RMHATestBase.java|   2 +
 .../TestNodeBlacklistingOnAMFailures.java   |  12 ++
 .../yarn/server/resourcemanager/TestRM.java |   6 +
 .../resourcemanager/TestRMAdminService.java |   5 +
 .../server/resourcemanager/TestRMRestart.java   |   5 +
 .../TestWorkPreservingRMRestart.java|   4 +
 .../TestWorkPreservingUnmanagedAM.java  |   4 +-
 .../reservation/TestReservationSystem.java  |   4 +
 .../resourcetracker/TestNMReconnect.java|   5 +
 .../scheduler/TestAbstractYarnScheduler.java|  42 -
 .../TestSchedulingWithAllocationRequestId.java  | 155 ---
 .../fair/TestContinuousScheduling.java  |  11 +-
 .../policy/TestFairOrderingPolicy.java  |   6 +-
 .../security/TestClientToAMTokens.java  |   4 +
 19 files changed, 306 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/956d8140/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 0cac8c0..d993fcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -121,6 +122,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystemTestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
@@ -131,14 +133,16 @@ import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 
 public class TestYarnClient {
 
-  @Test
-  public void test() {
-// More to come later.
+  @Before
+  public void setup() {
+QueueMetrics.clearQueueMetrics();
+DefaultMetricsSystem.setMiniClusterMode(true);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/956d8140/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 

[05/44] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-10-31 Thread templedf
YARN-4081. Add support for multiple resource types in the Resource class. 
(Varun Vasudev via wangda)

(cherry picked from commit 1bbab7c1570a2438b2fa6da70397dd1d5211a137)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68da5218
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68da5218
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68da5218

Branch: refs/heads/branch-3.0
Commit: 68da5218b74ea4eef645766ac245b7076ba7dcfb
Parents: b5f66b0
Author: Wangda Tan 
Authored: Thu Sep 10 09:43:26 2015 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:15 2017 -0700

--
 .../src/main/resources/META-INF/LICENSE.txt | 1661 ++
 .../src/main/resources/META-INF/NOTICE.txt  |  283 +++
 .../yarn/api/protocolrecords/ResourceTypes.java |   27 +
 .../hadoop/yarn/api/records/Resource.java   |  205 ++-
 .../yarn/api/records/ResourceInformation.java   |  218 +++
 .../exceptions/ResourceNotFoundException.java   |   45 +
 .../hadoop/yarn/util/UnitsConversionUtil.java   |  197 +++
 .../src/main/proto/yarn_protos.proto|   12 +
 .../yarn/conf/TestResourceInformation.java  |   70 +
 .../yarn/util/TestUnitsConversionUtil.java  |  120 ++
 .../yarn/api/records/impl/pb/ProtoUtils.java|   13 +
 .../api/records/impl/pb/ResourcePBImpl.java |  191 +-
 .../hadoop/yarn/util/resource/Resources.java|  137 +-
 .../hadoop/yarn/api/TestPBImplRecords.java  |4 +
 14 files changed, 3103 insertions(+), 80 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/44] hadoop git commit: YARN-7397. Reduce lock contention in FairScheduler#getAppWeight()

2017-10-31 Thread templedf
YARN-7397. Reduce lock contention in FairScheduler#getAppWeight()

(cherry picked from commit e62bbbca7adafa0e050212e99c41c95a844700ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab93bf5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab93bf5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab93bf5b

Branch: refs/heads/branch-3.0
Commit: ab93bf5b00fa090d7a78ccc13f0d1529439f0b83
Parents: c818c85
Author: Daniel Templeton 
Authored: Sat Oct 28 09:13:13 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:18 2017 -0700

--
 .../scheduler/fair/FairScheduler.java| 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab93bf5b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 0441859..8ea07ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -369,17 +369,20 @@ public class FairScheduler extends
   }
 
   public float getAppWeight(FSAppAttempt app) {
-try {
+double weight = 1.0;
+
+if (sizeBasedWeight) {
   readLock.lock();
-  double weight = 1.0;
-  if (sizeBasedWeight) {
+
+  try {
 // Set weight based on current memory demand
 weight = Math.log1p(app.getDemand().getMemorySize()) / Math.log(2);
+  } finally {
+readLock.unlock();
   }
-  return (float)weight * app.getPriority().getPriority();
-} finally {
-  readLock.unlock();
 }
+
+return (float)weight * app.getPriority().getPriority();
   }
 
   public Resource getIncrementResourceCapability() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/44] hadoop git commit: YARN-5586. Update the Resources class to consider all resource types. Contributed by Varun Vasudev.

2017-10-31 Thread templedf
YARN-5586. Update the Resources class to consider all resource types. 
Contributed by Varun Vasudev.

(cherry picked from commit 239c1824a07fede71dd751ed2c1f40036b22170e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d1b59e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d1b59e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d1b59e0

Branch: refs/heads/branch-3.0
Commit: 3d1b59e04a1a7a6a4aaa4102dc58a5c1800de5dc
Parents: feb6d00
Author: Rohith Sharma K S 
Authored: Mon Sep 12 10:44:26 2016 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../api/records/impl/pb/ResourcePBImpl.java |   4 +-
 .../resource/DominantResourceCalculator.java|  36 ++--
 .../yarn/util/resource/ResourceUtils.java   |   3 +-
 .../hadoop/yarn/util/resource/Resources.java| 138 +++--
 .../yarn/util/resource/TestResourceUtils.java   |  23 +++
 .../yarn/util/resource/TestResources.java   | 207 +--
 .../resourcemanager/resource/TestResources.java |  43 
 7 files changed, 366 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d1b59e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 5f32648..472e645 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -170,7 +170,9 @@ public class ResourcePBImpl extends Resource {
   resourceInformation.setName(resource);
 }
 initResources();
-resources.put(resource, resourceInformation);
+if (resources.containsKey(resource)) {
+  resources.put(resource, resourceInformation);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d1b59e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 3c4413c..7db1da4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -183,8 +183,10 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 Long requiredResourceValue = UnitsConversionUtil
 .convert(requiredResource.getUnits(), availableResource.getUnits(),
 requiredResource.getValue());
-Long tmp = availableResource.getValue() / requiredResourceValue;
-min = min < tmp ? min : tmp;
+if (requiredResourceValue != 0) {
+  Long tmp = availableResource.getValue() / requiredResourceValue;
+  min = min < tmp ? min : tmp;
+}
   } catch (YarnException ye) {
 throw new IllegalArgumentException(
 "Error getting resource information for " + resource, ye);
@@ -301,10 +303,11 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 .convert(stepFactorResourceInformation.getUnits(),
 rResourceInformation.getUnits(),
 stepFactorResourceInformation.getValue());
-
-tmp.setValue(
-Math.min(roundUp(Math.max(rValue, minimumValue), stepFactorValue),
-maximumValue));
+Long value = Math.max(rValue, minimumValue);
+if (stepFactorValue != 0) {
+  value = roundUp(value, stepFactorValue);
+}
+tmp.setValue(Math.min(value, maximumValue));
 ret.setResourceInformation(resource, tmp);
   } catch (YarnException ye) {
 throw new IllegalArgumentException(
@@ -340,9 +343,11 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 .convert(stepFactorResourceInformation.getUnits(),
 

[23/44] hadoop git commit: YARN-7039. Fix javac and javadoc errors in YARN-3926 branch. (Sunil G via wangda)

2017-10-31 Thread templedf
http://git-wip-us.apache.org/repos/asf/hadoop/blob/57bbdbeb/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
deleted file mode 100644
index 63fbc9d..000
--- a/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
+++ /dev/null
@@ -1,283 +0,0 @@
-This product includes software developed by The Apache Software
-Foundation (http://www.apache.org/).
-
-The binary distribution of this product bundles binaries of
-org.iq80.leveldb:leveldb-api (https://github.com/dain/leveldb), which has the
-following notices:
-* Copyright 2011 Dain Sundstrom 
-* Copyright 2011 FuseSource Corp. http://fusesource.com
-
-The binary distribution of this product bundles binaries of
-org.fusesource.hawtjni:hawtjni-runtime (https://github.com/fusesource/hawtjni),
-which has the following notices:
-* This product includes software developed by FuseSource Corp.
-  http://fusesource.com
-* This product includes software developed at
-  Progress Software Corporation and/or its  subsidiaries or affiliates.
-* This product includes software developed by IBM Corporation and others.
-
-The binary distribution of this product bundles binaries of
-AWS Java SDK 1.10.6,
-which has the following notices:
- * This software includes third party software subject to the following
- copyrights: - XML parsing and utility functions from JetS3t - Copyright
- 2006-2009 James Murty. - JSON parsing and utility functions from JSON.org -
- Copyright 2002 JSON.org. - PKCS#1 PEM encoded private key parsing and utility
- functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc.
-
-The binary distribution of this product bundles binaries of
-Gson 2.2.4,
-which has the following notices:
-
-The Netty Project
-=
-
-Please visit the Netty web site for more information:
-
-  * http://netty.io/
-
-Copyright 2014 The Netty Project
-
-The Netty Project licenses this file to you under the Apache License,
-version 2.0 (the "License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at:
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations
-under the License.
-
-Also, please refer to each LICENSE..txt file, which is located in
-the 'license' directory of the distribution file, for the license terms of the
-components that this product depends on.
-

-This product contains the extensions to Java Collections Framework which has
-been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
-
-  * LICENSE:
-* license/LICENSE.jsr166y.txt (Public Domain)
-  * HOMEPAGE:
-* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
-* 
http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
-
-This product contains a modified version of Robert Harder's Public Domain
-Base64 Encoder and Decoder, which can be obtained at:
-
-  * LICENSE:
-* license/LICENSE.base64.txt (Public Domain)
-  * HOMEPAGE:
-* http://iharder.sourceforge.net/current/java/base64/
-
-This product contains a modified portion of 'Webbit', an event based
-WebSocket and HTTP server, which can be obtained at:
-
-  * LICENSE:
-* license/LICENSE.webbit.txt (BSD License)
-  * HOMEPAGE:
-* https://github.com/joewalnes/webbit
-
-This product contains a modified portion of 'SLF4J', a simple logging
-facade for Java, which can be obtained at:
-
-  * LICENSE:
-* license/LICENSE.slf4j.txt (MIT License)
-  * HOMEPAGE:
-* http://www.slf4j.org/
-
-This product contains a modified portion of 'ArrayDeque', written by Josh
-Bloch of Google, Inc:
-
-  * LICENSE:
-* license/LICENSE.deque.txt (Public Domain)
-
-This product contains a modified portion of 'Apache Harmony', an open source
-Java SE, which can be obtained at:
-
-  * LICENSE:
-* license/LICENSE.harmony.txt (Apache License 2.0)
-  * HOMEPAGE:
-* http://archive.apache.org/dist/harmony/
-
-This product contains a modified version of Roland Kuhn's ASL2
-AbstractNodeQueue, which is based on Dmitriy Vyukov's non-intrusive MPSC queue.
-It can be obtained at:
-
-  * LICENSE:
-* license/LICENSE.abstractnodequeue.txt (Public Domain)
-  * HOMEPAGE:
-* 
https://github.com/akka/akka/blob/wip-2.2.3-for-scala-2.11/akka-actor/src/main/java/akka/dispatch/AbstractNodeQueue.java
-
-This product contains a modified portion of 'jbzip2', a Java bzip2 compression
-and decompression library 

[04/44] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-10-31 Thread templedf
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68da5218/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
new file mode 100644
index 000..44880df
--- /dev/null
+++ b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
@@ -0,0 +1,1661 @@
+
+ Apache License
+   Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+  "License" shall mean the terms and conditions for use, reproduction,
+  and distribution as defined by Sections 1 through 9 of this document.
+
+  "Licensor" shall mean the copyright owner or entity authorized by
+  the copyright owner that is granting the License.
+
+  "Legal Entity" shall mean the union of the acting entity and all
+  other entities that control, are controlled by, or are under common
+  control with that entity. For the purposes of this definition,
+  "control" means (i) the power, direct or indirect, to cause the
+  direction or management of such entity, whether by contract or
+  otherwise, or (ii) ownership of fifty percent (50%) or more of the
+  outstanding shares, or (iii) beneficial ownership of such entity.
+
+  "You" (or "Your") shall mean an individual or Legal Entity
+  exercising permissions granted by this License.
+
+  "Source" form shall mean the preferred form for making modifications,
+  including but not limited to software source code, documentation
+  source, and configuration files.
+
+  "Object" form shall mean any form resulting from mechanical
+  transformation or translation of a Source form, including but
+  not limited to compiled object code, generated documentation,
+  and conversions to other media types.
+
+  "Work" shall mean the work of authorship, whether in Source or
+  Object form, made available under the License, as indicated by a
+  copyright notice that is included in or attached to the work
+  (an example is provided in the Appendix below).
+
+  "Derivative Works" shall mean any work, whether in Source or Object
+  form, that is based on (or derived from) the Work and for which the
+  editorial revisions, annotations, elaborations, or other modifications
+  represent, as a whole, an original work of authorship. For the purposes
+  of this License, Derivative Works shall not include works that remain
+  separable from, or merely link (or bind by name) to the interfaces of,
+  the Work and Derivative Works thereof.
+
+  "Contribution" shall mean any work of authorship, including
+  the original version of the Work and any modifications or additions
+  to that Work or Derivative Works thereof, that is intentionally
+  submitted to Licensor for inclusion in the Work by the copyright owner
+  or by an individual or Legal Entity authorized to submit on behalf of
+  the copyright owner. For the purposes of this definition, "submitted"
+  means any form of electronic, verbal, or written communication sent
+  to the Licensor or its representatives, including but not limited to
+  communication on electronic mailing lists, source code control systems,
+  and issue tracking systems that are managed by, or on behalf of, the
+  Licensor for the purpose of discussing and improving the Work, but
+  excluding communication that is conspicuously marked or otherwise
+  designated in writing by the copyright owner as "Not a Contribution."
+
+  "Contributor" shall mean Licensor and any individual or Legal Entity
+  on behalf of whom a Contribution has been received by Licensor and
+  subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  copyright license to reproduce, prepare Derivative Works of,
+  publicly display, publicly perform, sublicense, and distribute the
+  Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  (except as stated in this section) patent license to make, have made,
+  use, offer to sell, sell, import, and otherwise transfer the Work,
+  where such license applies only to those patent claims licensable
+  by such Contributor that are necessarily infringed by their
+  Contribution(s) alone or by 

[27/44] hadoop git commit: YARN-6933. [YARN-3926] ResourceUtils.DISALLOWED_NAMES check is duplicated. Contributed by Manikandan R.

2017-10-31 Thread templedf
YARN-6933. [YARN-3926] ResourceUtils.DISALLOWED_NAMES check is duplicated. 
Contributed by Manikandan R.

(cherry picked from commit 805095496dfd8e50f71b70cf20845e954d3ba47c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bf29910
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bf29910
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bf29910

Branch: refs/heads/branch-3.0
Commit: 4bf29910a8186609a41c0971c516c5c3b9c954ea
Parents: 00fcc85
Author: Sunil G 
Authored: Wed Sep 6 18:51:14 2017 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../yarn/util/resource/ResourceUtils.java   | 38 ++--
 .../resource-types/resource-types-error-2.xml   |  4 +++
 .../resource-types/resource-types-error-3.xml   |  2 +-
 3 files changed, 23 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bf29910/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 94c2e97..e3e25d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -42,10 +42,8 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 
 /**
@@ -63,13 +61,6 @@ public class ResourceUtils {
   private static final String MEMORY = ResourceInformation.MEMORY_MB.getName();
   private static final String VCORES = ResourceInformation.VCORES.getName();
 
-  private static final Set DISALLOWED_NAMES = new HashSet<>();
-  static {
-DISALLOWED_NAMES.add("memory");
-DISALLOWED_NAMES.add(MEMORY);
-DISALLOWED_NAMES.add(VCORES);
-  }
-
   private static volatile boolean initializedResources = false;
   private static final Map RESOURCE_NAME_TO_INDEX =
   new ConcurrentHashMap();
@@ -85,9 +76,21 @@ public class ResourceUtils {
   private ResourceUtils() {
   }
 
-  private static void checkMandatatoryResources(
+  private static void checkMandatoryResources(
   Map resourceInformationMap)
   throws YarnRuntimeException {
+/*
+ * Supporting 'memory' also as invalid resource name, in addition to
+ * 'MEMORY' for historical reasons
+ */
+String key = "memory";
+if (resourceInformationMap.containsKey(key)) {
+  LOG.warn("Attempt to define resource '" + key +
+  "', but it is not allowed.");
+  throw new YarnRuntimeException("Attempt to re-define mandatory resource 
'"
+  + key + "'.");
+}
+
 if (resourceInformationMap.containsKey(MEMORY)) {
   ResourceInformation memInfo = resourceInformationMap.get(MEMORY);
   String memUnits = ResourceInformation.MEMORY_MB.getUnits();
@@ -113,7 +116,7 @@ public class ResourceUtils {
 }
   }
 
-  private static void addManadtoryResources(
+  private static void addMandatoryResources(
   Map res) {
 ResourceInformation ri;
 if (!res.containsKey(MEMORY)) {
@@ -229,11 +232,6 @@ public class ResourceUtils {
   "Incomplete configuration for resource type '" + resourceName
   + "'. One of name, units or type is configured 
incorrectly.");
 }
-if (DISALLOWED_NAMES.contains(resourceName)) {
-  throw new YarnRuntimeException(
-  "Resource type cannot be named '" + resourceName
-  + "'. That name is disallowed.");
-}
 ResourceTypes resourceType = ResourceTypes.valueOf(resourceTypeName);
 LOG.info("Adding resource type - name = " + resourceName + ", units = "
 + resourceUnits + ", type = " + resourceTypeName);
@@ -246,8 +244,8 @@ public class ResourceUtils {
 minimumAllocation, maximumAllocation));
   }
 }
-checkMandatatoryResources(resourceInformationMap);
-addManadtoryResources(resourceInformationMap);
+checkMandatoryResources(resourceInformationMap);
+addMandatoryResources(resourceInformationMap);
 setMinimumAllocationForMandatoryResources(resourceInformationMap, 

[42/44] hadoop git commit: YARN-7374. Improve performance of DRF comparisons for resource types in fair scheduler

2017-10-31 Thread templedf
YARN-7374. Improve performance of DRF comparisons for resource types in fair 
scheduler

(cherry picked from commit 9711b78998ca3a1f7734058a78c7baddd130ce0f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77401022
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77401022
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77401022

Branch: refs/heads/branch-3.0
Commit: 77401022a7a51dd8cc116d26d3c4adab0e826868
Parents: ab93bf5
Author: Daniel Templeton 
Authored: Sun Oct 29 17:45:46 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:18 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |   7 +-
 .../yarn/api/records/ResourceInformation.java   |   1 -
 .../DominantResourceFairnessPolicy.java | 238 +--
 .../TestDominantResourceFairnessPolicy.java | 102 ++--
 4 files changed, 306 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77401022/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 7e8c01d..b03cca1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -22,6 +22,7 @@ import java.util.Arrays;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -66,8 +67,10 @@ public abstract class Resource implements 
Comparable {
   // copy array, etc.
   protected static final int NUM_MANDATORY_RESOURCES = 2;
 
-  protected static final int MEMORY_INDEX = 0;
-  protected static final int VCORES_INDEX = 1;
+  @Private
+  public static final int MEMORY_INDEX = 0;
+  @Private
+  public static final int VCORES_INDEX = 1;
 
   @Public
   @Stable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77401022/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 2a04094..984112a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.api.records;
 
-import org.apache.curator.shaded.com.google.common.reflect.ClassPath;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77401022/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index e58b357..59635d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ 

[07/44] hadoop git commit: YARN-6788. [YARN-3926] Improve performance of resource profile branch (Contributed by Sunil Govindan via Daniel Templeton)

2017-10-31 Thread templedf
YARN-6788. [YARN-3926] Improve performance of resource profile branch
(Contributed by Sunil Govindan via Daniel Templeton)

(cherry picked from commit 3aeaafecb823ef6c175ea5d0d9cb726faacaf32d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99c29240
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99c29240
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99c29240

Branch: refs/heads/branch-3.0
Commit: 99c292403d7c49e6ee2c426ff9f59e4575ff8df0
Parents: 58f2c07
Author: Daniel Templeton 
Authored: Fri Aug 4 08:42:34 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|  18 +
 .../hadoop/yarn/api/records/Resource.java   | 234 
 .../yarn/api/records/ResourceInformation.java   |  13 +-
 .../yarn/api/records/impl/BaseResource.java | 133 +
 .../yarn/api/records/impl/package-info.java |  22 +
 .../hadoop/yarn/util/UnitsConversionUtil.java   |   8 +-
 .../yarn/util/resource/ResourceUtils.java   | 534 +++
 .../hadoop/yarn/util/resource/package-info.java |  22 +
 .../yarn/api/records/impl/pb/ProtoUtils.java|   5 +-
 .../api/records/impl/pb/ResourcePBImpl.java | 110 ++--
 .../resource/DominantResourceCalculator.java|  67 ++-
 .../yarn/util/resource/ResourceUtils.java   | 488 -
 .../hadoop/yarn/util/resource/Resources.java| 194 ---
 .../yarn/util/resource/TestResourceUtils.java   |  14 +-
 .../yarn/util/resource/TestResources.java   |   7 +-
 .../rmapp/attempt/RMAppAttemptMetrics.java  |  11 +-
 .../scheduler/SchedulerApplicationAttempt.java  |   9 +-
 .../webapp/dao/SchedulerInfo.java   |   3 +-
 .../server/resourcemanager/TestAppManager.java  |   1 +
 19 files changed, 1031 insertions(+), 862 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99c29240/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index ce7a9c6..a5b4021 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -603,4 +603,22 @@
 
   
 
+  
+
+
+
+
+  
+
+  
+
+
+
+  
+
+  
+
+
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99c29240/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index fb31745..bbd4c87 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.api.records;
 
+import java.util.Arrays;
+
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -25,13 +27,10 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.records.impl.BaseResource;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
-import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.Records;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 /**
  * Resource models a set of computer resources in the 
@@ -60,97 +59,49 @@ import java.util.Map;
 @Stable
 public abstract class Resource implements Comparable {
 
-  private static Resource tmpResource = Records.newRecord(Resource.class);
-
-  private static class SimpleResource extends Resource {
-private long memory;
-private long vcores;
-private Map resourceInformationMap;
-
-SimpleResource(long memory, long vcores) {
-  this.memory = memory;
-  this.vcores = vcores;
-
-}
-@Override
-public int getMemory() {
-

[37/44] hadoop git commit: YARN-6612. Update fair scheduler policies to be aware of resource types. (Contributed by Daniel Templeton via Yufei Gu)

2017-10-31 Thread templedf
YARN-6612. Update fair scheduler policies to be aware of resource types. 
(Contributed by Daniel Templeton via Yufei Gu)

(cherry picked from commit 09b476e6dabe8039a41dde7930c8a9c0d14bb750)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f30f78a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f30f78a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f30f78a7

Branch: refs/heads/branch-3.0
Commit: f30f78a7363f54d33c2b4bf80f6c73b5f480a02c
Parents: 74030d8
Author: Yufei Gu 
Authored: Thu Sep 14 11:22:08 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:18 2017 -0700

--
 .../yarn/util/resource/ResourceUtils.java   |   5 +-
 .../resource/ResourceWeights.java   |  72 -
 .../scheduler/fair/AllocationConfiguration.java |  11 +-
 .../fair/AllocationFileLoaderService.java   |   7 +-
 .../scheduler/fair/FSAppAttempt.java|   9 +-
 .../scheduler/fair/FSLeafQueue.java |   3 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |   9 +-
 .../scheduler/fair/FairScheduler.java   |   9 +-
 .../scheduler/fair/Schedulable.java |  12 +-
 .../fair/policies/ComputeFairShares.java|  81 ++---
 .../DominantResourceFairnessPolicy.java | 240 +++
 .../fair/policies/FairSharePolicy.java  |  15 +-
 .../TestFairSchedulerPlanFollower.java  |   6 +-
 .../resource/TestResourceWeights.java   |  55 
 .../scheduler/fair/FakeSchedulable.java |  29 +-
 .../scheduler/fair/TestComputeFairShares.java   |  58 ++--
 .../scheduler/fair/TestFairScheduler.java   |  14 +-
 .../scheduler/fair/TestSchedulingPolicy.java|  13 +-
 .../TestDominantResourceFairnessPolicy.java | 304 +++
 19 files changed, 543 insertions(+), 409 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30f78a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 1da5d6a..0564d74 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -344,11 +344,10 @@ public class ResourceUtils {
 addResourcesFileToConf(resourceFile, conf);
 LOG.debug("Found " + resourceFile + ", adding to configuration");
   } catch (FileNotFoundException fe) {
-LOG.info("Unable to find '" + resourceFile
-+ "'. Falling back to memory and vcores as resources.");
+LOG.debug("Unable to find '" + resourceFile + "'.");
   }
-  initializeResourcesMap(conf);
 
+  initializeResourcesMap(conf);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30f78a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
deleted file mode 100644
index b66a5d0..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES 

[17/44] hadoop git commit: YARN-6892. [YARN-3926] Improve API implementation in Resources and DominantResourceCalculator class. Contributed by Sunil G.

2017-10-31 Thread templedf
YARN-6892. [YARN-3926] Improve API implementation in Resources and 
DominantResourceCalculator class. Contributed by Sunil G.

(cherry picked from commit 2b51b262aba0191b80dc93799574c0b959cb4f4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5231c43b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5231c43b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5231c43b

Branch: refs/heads/branch-3.0
Commit: 5231c43bdb974131c14ba1c1c5715aec1a1089f3
Parents: 9c4505f
Author: Sunil G 
Authored: Wed Aug 16 15:25:36 2017 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |  70 +++-
 .../resource/DominantResourceCalculator.java| 317 ---
 .../hadoop/yarn/util/resource/Resources.java|  98 +++---
 3 files changed, 254 insertions(+), 231 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5231c43b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 54f0b18..6c0bed5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -164,7 +164,6 @@ public abstract class Resource implements 
Comparable {
 "This method is implemented by ResourcePBImpl");
   }
 
-
   /**
* Get number of virtual cpu cores of the resource.
* 
@@ -179,7 +178,7 @@ public abstract class Resource implements 
Comparable {
   @Public
   @Evolving
   public abstract int getVirtualCores();
-  
+
   /**
* Set number of virtual cpu cores of the resource.
* 
@@ -225,6 +224,27 @@ public abstract class Resource implements 
Comparable {
   }
 
   /**
+   * Get ResourceInformation for a specified resource from a given index.
+   *
+   * @param index
+   *  of the resource
+   * @return the ResourceInformation object for the resource
+   * @throws ResourceNotFoundException
+   *   if the resource can't be found
+   */
+  @Public
+  @Evolving
+  public ResourceInformation getResourceInformation(int index)
+  throws ResourceNotFoundException {
+ResourceInformation[] resources = getResources();
+if (index < 0 || index >= resources.length) {
+  throw new ResourceNotFoundException("Unknown resource at index '" + index
+  + "'. Vaid resources are: " + Arrays.toString(resources));
+}
+return resources[index];
+  }
+
+  /**
* Get the value for a specified resource. No information about the units is
* returned.
*
@@ -264,6 +284,29 @@ public abstract class Resource implements 
Comparable {
   }
 
   /**
+   * Set the ResourceInformation object for a particular resource.
+   *
+   * @param index
+   *  the resource index for which the ResourceInformation is provided
+   * @param resourceInformation
+   *  ResourceInformation object
+   * @throws ResourceNotFoundException
+   *   if the resource is not found
+   */
+  @Public
+  @Evolving
+  public void setResourceInformation(int index,
+  ResourceInformation resourceInformation)
+  throws ResourceNotFoundException {
+ResourceInformation[] resources = getResources();
+if (index < 0 || index >= resources.length) {
+  throw new ResourceNotFoundException("Unknown resource at index '" + index
+  + "'. Valid resources are " + Arrays.toString(resources));
+}
+ResourceInformation.copy(resourceInformation, resources[index]);
+  }
+
+  /**
* Set the value of a resource in the ResourceInformation object. The unit of
* the value is assumed to be the one in the ResourceInformation object.
*
@@ -288,6 +331,29 @@ public abstract class Resource implements 
Comparable {
 storedResourceInfo.setValue(value);
   }
 
+  /**
+   * Set the value of a resource in the ResourceInformation object. The unit of
+   * the value is assumed to be the one in the ResourceInformation object.
+   *
+   * @param index
+   *  the resource index for which the value is provided.
+   * @param value
+   *  the value to set
+   * @throws ResourceNotFoundException
+   *   if the resource is not found
+   */
+  @Public
+  @Evolving
+  public void setResourceValue(int index, long value)
+  throws ResourceNotFoundException {
+ResourceInformation[] 

[39/44] hadoop git commit: YARN-7336. Unsafe cast from long to int Resource.hashCode() method (Contributed by Miklos Szegedi via Daniel Templeton)

2017-10-31 Thread templedf
YARN-7336. Unsafe cast from long to int Resource.hashCode() method
(Contributed by Miklos Szegedi via Daniel Templeton)

(cherry picked from commit d64736d58965722b71d6eade578b6c4c266e6448)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c025c9ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c025c9ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c025c9ac

Branch: refs/heads/branch-3.0
Commit: c025c9ac62efa84ba9ae5958087703eaa235e5e6
Parents: 7689d47
Author: Daniel Templeton 
Authored: Mon Oct 30 12:40:29 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:18 2017 -0700

--
 .../main/java/org/apache/hadoop/yarn/api/records/Resource.java  | 4 ++--
 .../hadoop/yarn/api/records/impl/LightWeightResource.java   | 5 +
 2 files changed, 3 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c025c9ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index b03cca1..1fabae8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -463,11 +463,11 @@ public abstract class Resource implements 
Comparable {
   @Override
   public int hashCode() {
 final int prime = 47;
-long result = 0;
+int result = 0;
 for (ResourceInformation entry : resources) {
   result = prime * result + entry.hashCode();
 }
-return (int) result;
+return result;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c025c9ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index a64d242..7b07bbd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -155,9 +155,6 @@ public class LightWeightResource extends Resource {
   @Override
   public int hashCode() {
 final int prime = 47;
-long result = prime + getMemorySize();
-result = prime * result + getVirtualCores();
-
-return (int) result;
+return prime * (prime + Long.hashCode(getMemorySize())) + 
getVirtualCores();
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/44] hadoop git commit: YARN-7093. Improve log message in ResourceUtils. (Sunil G via wangda)

2017-10-31 Thread templedf
YARN-7093. Improve log message in ResourceUtils. (Sunil G via wangda)

Change-Id: I88928a747ee3eec17bc76fb71e9aaa632d091f0f
(cherry picked from commit 53df3eac503064b8c4cd8eb2b659567622f4d8a7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00fcc859
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00fcc859
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00fcc859

Branch: refs/heads/branch-3.0
Commit: 00fcc8593bdc7ccebeced75d948e0ac204701d4b
Parents: 57bbdbe
Author: Wangda Tan 
Authored: Thu Aug 24 10:44:28 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00fcc859/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index ed16104..94c2e97 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -353,7 +353,7 @@ public class ResourceUtils {
 initializedResources = true;
   } catch (FileNotFoundException fe) {
 LOG.info("Unable to find '" + resourceFile
-+ "'. Falling back to memory and vcores as resources", fe);
++ "'. Falling back to memory and vcores as resources.");
 initializeResourcesMap(conf);
 initializedResources = true;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/44] hadoop git commit: YARN-4830. Add support for resource types in the nodemanager. Contributed by Varun Vasudev.

2017-10-31 Thread templedf
YARN-4830. Add support for resource types in the nodemanager. Contributed by 
Varun Vasudev.

(cherry picked from commit 759114b0063907d4c07ea6ee261e861bf5cc3a9a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16beac12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16beac12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16beac12

Branch: refs/heads/branch-3.0
Commit: 16beac12ad64ecb470b72e08c285a664b44319b9
Parents: 92ca475
Author: Varun Vasudev 
Authored: Sat Jun 11 14:33:46 2016 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |   3 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  19 ++-
 .../FileSystemBasedConfigurationProvider.java   |   3 +-
 .../hadoop/yarn/LocalConfigurationProvider.java |   3 +-
 .../api/records/impl/pb/ResourcePBImpl.java |  53 +++---
 .../yarn/util/resource/ResourceUtils.java   | 168 +++
 .../yarn/util/resource/TestResourceUtils.java   |  29 +++-
 .../resource-types/node-resources-1.xml |  29 
 .../resource-types/node-resources-2.xml |  39 +
 .../nodemanager/NodeStatusUpdaterImpl.java  |   7 +-
 .../util/NodeManagerHardwareUtils.java  |  52 ++
 .../resourcemanager/ResourceTrackerService.java |   9 +-
 12 files changed, 342 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16beac12/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index f8d250b..1de7b2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -308,7 +308,8 @@ public abstract class Resource implements 
Comparable {
 continue;
   }
   if (entry.getKey().equals(ResourceInformation.VCORES.getName())
-  && entry.getValue().getUnits().equals("")) {
+  && entry.getValue().getUnits()
+  .equals(ResourceInformation.VCORES.getUnits())) {
 continue;
   }
   sb.append(", ").append(entry.getKey()).append(": ")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16beac12/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index c85175a..6679ca7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -65,6 +65,10 @@ public class YarnConfiguration extends Configuration {
   "resource-types.xml";
 
   @Private
+  public static final String NODE_RESOURCES_CONFIGURATION_FILE =
+  "node-resources.xml";
+
+  @Private
   public static final List RM_CONFIGURATION_FILES =
   Collections.unmodifiableList(Arrays.asList(
   RESOURCE_TYPES_CONFIGURATION_FILE,
@@ -74,6 +78,16 @@ public class YarnConfiguration extends Configuration {
   YARN_SITE_CONFIGURATION_FILE,
   CORE_SITE_CONFIGURATION_FILE));
 
+  @Private
+  public static final List NM_CONFIGURATION_FILES =
+  Collections.unmodifiableList(Arrays.asList(
+  NODE_RESOURCES_CONFIGURATION_FILE,
+  DR_CONFIGURATION_FILE,
+  CS_CONFIGURATION_FILE,
+  HADOOP_POLICY_CONFIGURATION_FILE,
+  YARN_SITE_CONFIGURATION_FILE,
+  CORE_SITE_CONFIGURATION_FILE));
+
   @Evolving
   public static final int APPLICATION_MAX_TAGS = 10;
 
@@ -112,12 +126,15 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_PREFIX = "yarn.";
 
   /
-  // Scheduler resource types configs
+  // Resource types configs
   
 
   public static final String RESOURCE_TYPES =
   YarnConfiguration.YARN_PREFIX + "resource-types";
 
+  public static final String NM_RESOURCES_PREFIX =
+  YarnConfiguration.NM_PREFIX + 

[38/44] hadoop git commit: YARN-6984. DominantResourceCalculator.isAnyMajorResourceZero() should test all resources (Contributed by Sunil G via Daniel Templeton)

2017-10-31 Thread templedf
YARN-6984. DominantResourceCalculator.isAnyMajorResourceZero() should test all 
resources
(Contributed by Sunil G via Daniel Templeton)

(cherry picked from commit 679f99b1455a1fcd785aef8ddc705f63cf0e6518)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c818c852
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c818c852
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c818c852

Branch: refs/heads/branch-3.0
Commit: c818c85238b91407e41f3b29a888394170ff447a
Parents: 6858515
Author: Daniel Templeton 
Authored: Tue Oct 24 10:04:36 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:18 2017 -0700

--
 .../yarn/util/resource/DominantResourceCalculator.java| 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c818c852/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index ca828a5..6b284e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -557,6 +557,14 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 
   @Override
   public boolean isAnyMajorResourceZero(Resource resource) {
-return resource.getMemorySize() == 0f || resource.getVirtualCores() == 0;
+int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+for (int i = 0; i < maxLength; i++) {
+  ResourceInformation resourceInformation = resource
+  .getResourceInformation(i);
+  if (resourceInformation.getValue() == 0L) {
+return true;
+  }
+}
+return false;
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/44] hadoop git commit: YARN-7039. Fix javac and javadoc errors in YARN-3926 branch. (Sunil G via wangda)

2017-10-31 Thread templedf
YARN-7039. Fix javac and javadoc errors in YARN-3926 branch. (Sunil G via 
wangda)

Change-Id: I442bf6d838b3aba83f1f6779cf9dcf8596a2102d
(cherry picked from commit e490602e9b306d5b8a543b93fb15a7395bb9a03d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57bbdbeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57bbdbeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57bbdbeb

Branch: refs/heads/branch-3.0
Commit: 57bbdbeb8058d9d3176b431bdd1665bd9c8eee29
Parents: 5dd008c
Author: Wangda Tan 
Authored: Tue Aug 22 16:18:01 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../src/main/resources/META-INF/LICENSE.txt | 1661 --
 .../src/main/resources/META-INF/NOTICE.txt  |  283 ---
 .../records/ApplicationResourceUsageReport.java |1 -
 .../yarn/api/records/ResourceTypeInfo.java  |6 +-
 .../yarn/api/records/impl/package-info.java |4 +
 .../hadoop/yarn/util/UnitsConversionUtil.java   |5 +-
 .../yarn/util/resource/ResourceUtils.java   |4 +-
 .../hadoop/yarn/util/resource/package-info.java |4 +
 .../yarn/conf/TestResourceInformation.java  |3 +
 .../yarn/util/TestUnitsConversionUtil.java  |8 +-
 .../applications/distributedshell/Client.java   |1 +
 .../pb/GetAllResourceTypeInfoRequestPBImpl.java |3 +-
 .../ApplicationResourceUsageReportPBImpl.java   |1 -
 .../records/impl/pb/ResourceTypeInfoPBImpl.java |2 -
 .../hadoop/yarn/util/resource/Resources.java|6 +-
 .../hadoop/yarn/api/TestPBImplRecords.java  |4 -
 .../yarn/util/resource/TestResourceUtils.java   |   47 +-
 .../nodemanager/NodeStatusUpdaterImpl.java  |8 +-
 .../util/NodeManagerHardwareUtils.java  |   36 +-
 .../util/TestNodeManagerHardwareUtils.java  |2 +-
 20 files changed, 78 insertions(+), 2011 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/44] hadoop git commit: YARN-7039. Fix javac and javadoc errors in YARN-3926 branch. (Sunil G via wangda)

2017-10-31 Thread templedf
http://git-wip-us.apache.org/repos/asf/hadoop/blob/57bbdbeb/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
deleted file mode 100644
index 44880df..000
--- a/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
+++ /dev/null
@@ -1,1661 +0,0 @@
-
- Apache License
-   Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-  "License" shall mean the terms and conditions for use, reproduction,
-  and distribution as defined by Sections 1 through 9 of this document.
-
-  "Licensor" shall mean the copyright owner or entity authorized by
-  the copyright owner that is granting the License.
-
-  "Legal Entity" shall mean the union of the acting entity and all
-  other entities that control, are controlled by, or are under common
-  control with that entity. For the purposes of this definition,
-  "control" means (i) the power, direct or indirect, to cause the
-  direction or management of such entity, whether by contract or
-  otherwise, or (ii) ownership of fifty percent (50%) or more of the
-  outstanding shares, or (iii) beneficial ownership of such entity.
-
-  "You" (or "Your") shall mean an individual or Legal Entity
-  exercising permissions granted by this License.
-
-  "Source" form shall mean the preferred form for making modifications,
-  including but not limited to software source code, documentation
-  source, and configuration files.
-
-  "Object" form shall mean any form resulting from mechanical
-  transformation or translation of a Source form, including but
-  not limited to compiled object code, generated documentation,
-  and conversions to other media types.
-
-  "Work" shall mean the work of authorship, whether in Source or
-  Object form, made available under the License, as indicated by a
-  copyright notice that is included in or attached to the work
-  (an example is provided in the Appendix below).
-
-  "Derivative Works" shall mean any work, whether in Source or Object
-  form, that is based on (or derived from) the Work and for which the
-  editorial revisions, annotations, elaborations, or other modifications
-  represent, as a whole, an original work of authorship. For the purposes
-  of this License, Derivative Works shall not include works that remain
-  separable from, or merely link (or bind by name) to the interfaces of,
-  the Work and Derivative Works thereof.
-
-  "Contribution" shall mean any work of authorship, including
-  the original version of the Work and any modifications or additions
-  to that Work or Derivative Works thereof, that is intentionally
-  submitted to Licensor for inclusion in the Work by the copyright owner
-  or by an individual or Legal Entity authorized to submit on behalf of
-  the copyright owner. For the purposes of this definition, "submitted"
-  means any form of electronic, verbal, or written communication sent
-  to the Licensor or its representatives, including but not limited to
-  communication on electronic mailing lists, source code control systems,
-  and issue tracking systems that are managed by, or on behalf of, the
-  Licensor for the purpose of discussing and improving the Work, but
-  excluding communication that is conspicuously marked or otherwise
-  designated in writing by the copyright owner as "Not a Contribution."
-
-  "Contributor" shall mean Licensor and any individual or Legal Entity
-  on behalf of whom a Contribution has been received by Licensor and
-  subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-  this License, each Contributor hereby grants to You a perpetual,
-  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-  copyright license to reproduce, prepare Derivative Works of,
-  publicly display, publicly perform, sublicense, and distribute the
-  Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-  this License, each Contributor hereby grants to You a perpetual,
-  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-  (except as stated in this section) patent license to make, have made,
-  use, offer to sell, sell, import, and otherwise transfer the Work,
-  where such license applies only to those patent claims licensable
-  by such Contributor that are necessarily infringed by their
-  Contribution(s) alone or 

[30/44] hadoop git commit: YARN-7136. Additional Performance Improvement for Resource Profile Feature (Contributed by Wangda Tan via Daniel Templeton)

2017-10-31 Thread templedf
YARN-7136. Additional Performance Improvement for Resource Profile Feature
(Contributed by Wangda Tan via Daniel Templeton)

(cherry picked from commit bf2b687412f9a830ec4834477ccf25dbe76fddcd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0db9ddb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0db9ddb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0db9ddb7

Branch: refs/heads/branch-3.0
Commit: 0db9ddb7284e3b585191397894c60726c16183b6
Parents: 335e3a6
Author: Daniel Templeton 
Authored: Mon Sep 11 14:17:57 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|   2 +-
 .../hadoop/yarn/api/records/Resource.java   | 178 +++--
 .../yarn/api/records/ResourceInformation.java   |  16 +-
 .../yarn/api/records/impl/BaseResource.java | 137 --
 .../api/records/impl/LightWeightResource.java   | 163 
 .../yarn/util/resource/ResourceUtils.java   |  23 +-
 .../api/records/impl/pb/ResourcePBImpl.java |  19 +-
 .../resource/DominantResourceCalculator.java|  75 +++---
 .../hadoop/yarn/util/resource/Resources.java|  30 ++-
 .../yarn/util/resource/TestResourceUtils.java   |   2 +
 .../yarn/server/resourcemanager/MockRM.java |   6 +-
 .../capacity/TestCapacityScheduler.java | 137 --
 .../capacity/TestCapacitySchedulerPerf.java | 265 +++
 .../hadoop/yarn/server/MiniYARNCluster.java |   7 +-
 14 files changed, 623 insertions(+), 437 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0db9ddb7/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index a5b4021..2aa9a5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -617,7 +617,7 @@
   
 
   
-
+
 
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0db9ddb7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index f3a5bc2..37b50f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
-import org.apache.hadoop.yarn.api.records.impl.BaseResource;
+import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -59,8 +59,15 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 @Stable
 public abstract class Resource implements Comparable {
 
-  protected static final String MEMORY = 
ResourceInformation.MEMORY_MB.getName();
-  protected static final String VCORES = ResourceInformation.VCORES.getName();
+  protected ResourceInformation[] resources = null;
+
+  // Number of mandatory resources, this is added to avoid invoke
+  // MandatoryResources.values().length, since values() internally will
+  // copy array, etc.
+  protected static final int NUM_MANDATORY_RESOURCES = 2;
+
+  protected static final int MEMORY_INDEX = 0;
+  protected static final int VCORES_INDEX = 1;
 
   @Public
   @Stable
@@ -71,7 +78,7 @@ public abstract class Resource implements 
Comparable {
   ret.setVirtualCores(vCores);
   return ret;
 }
-return new BaseResource(memory, vCores);
+return new LightWeightResource(memory, vCores);
   }
 
   @Public
@@ -83,7 +90,7 @@ public abstract class Resource implements 
Comparable {
   ret.setVirtualCores(vCores);
   return ret;
 }
-return new BaseResource(memory, vCores);
+return new LightWeightResource(memory, vCores);
   }
 
   @InterfaceAudience.Private
@@ -201,7 +208,9 @@ public abstract class Resource 

[35/44] hadoop git commit: YARN-7043. [Partial backport] Cleanup ResourceProfileManager. (wangda)

2017-10-31 Thread templedf
YARN-7043. [Partial backport] Cleanup ResourceProfileManager. (wangda)

Change-Id: I463356f37bf1f6a3f1fc3c594c79916e8c0ab913
(cherry picked from commit 39240b61a163b127eec385decd30ffd96e694c28)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/605fcde0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/605fcde0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/605fcde0

Branch: refs/heads/branch-3.0
Commit: 605fcde012022b0c18d441517dbabb2da29c9d98
Parents: f2b881a
Author: Wangda Tan 
Authored: Mon Aug 21 17:20:06 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../ams/ApplicationMasterServiceProcessor.java  |  8 ++--
 .../yarn/api/ApplicationClientProtocol.java |  4 +-
 .../YARNFeatureNotEnabledException.java | 45 
 .../yarn/util/resource/ResourceUtils.java   | 21 +
 .../hadoop/yarn/client/api/YarnClient.java  |  4 +-
 .../resource/DominantResourceCalculator.java|  6 ++-
 .../hadoop/yarn/api/TestPBImplRecords.java  |  1 +
 .../resourcemanager/AMSProcessingChain.java |  2 +-
 .../server/resourcemanager/ClientRMService.java |  4 +-
 .../resourcemanager/DefaultAMSProcessor.java|  3 +-
 ...pportunisticContainerAllocatorAMService.java |  3 +-
 .../scheduler/AbstractYarnScheduler.java|  1 +
 .../TestApplicationMasterService.java   | 11 +++--
 13 files changed, 95 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/605fcde0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
index b7d925a..8e76a11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
@@ -52,11 +52,13 @@ public interface ApplicationMasterServiceProcessor {
* @param request Register Request.
* @param response Register Response.
* @throws IOException IOException.
+   * @throws YarnException in critical situation where invalid
+   * profiles/resources are added.
*/
-  void registerApplicationMaster(
-  ApplicationAttemptId applicationAttemptId,
+  void registerApplicationMaster(ApplicationAttemptId applicationAttemptId,
   RegisterApplicationMasterRequest request,
-  RegisterApplicationMasterResponse response) throws IOException;
+  RegisterApplicationMasterResponse response)
+  throws IOException, YarnException;
 
   /**
* Allocate call.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/605fcde0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
index c18ec44..1f0a360 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
 
 /**
  * The protocol between clients and the ResourceManager
@@ -598,8 +599,7 @@ public interface ApplicationClientProtocol extends 
ApplicationBaseProtocol {
* 
* @param request request to get the details of a resource profile
* @return Response containing the details for a particular resource profile
-   * @throws YarnException if resource profiles are not enabled on the RM or
-   * the profile cannot be found
+   * @throws YarnException if any error happens inside YARN
* @throws IOException in case of other errors
*/
   @Public


[43/44] hadoop git commit: YARN-7172. ResourceCalculator.fitsIn() should not take a cluster resource parameter. (Sen Zhao via wangda)

2017-10-31 Thread templedf
YARN-7172. ResourceCalculator.fitsIn() should not take a cluster resource 
parameter. (Sen Zhao via wangda)

Change-Id: Icc3670c9381ce7591ca69ec12da5aa52d3612d34
(cherry picked from commit e81596d06d226f1cfa44b2390ce3095ed4dee621)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f418439
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f418439
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f418439

Branch: refs/heads/branch-3.0
Commit: 6f41843906cdfc38d1df27ef9eaca35524792a75
Parents: f30f78a
Author: Wangda Tan 
Authored: Sun Sep 17 21:20:43 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:18 2017 -0700

--
 .../resource/DefaultResourceCalculator.java |  3 +-
 .../resource/DominantResourceCalculator.java|  2 +-
 .../yarn/util/resource/ResourceCalculator.java  |  3 +-
 .../hadoop/yarn/util/resource/Resources.java|  4 +--
 .../util/resource/TestResourceCalculator.java   | 24 +++---
 .../server/resourcemanager/RMServerUtils.java   |  3 +-
 .../CapacitySchedulerPreemptionUtils.java   |  4 +--
 ...QueuePriorityContainerCandidateSelector.java |  5 ++-
 .../ReservedContainerCandidatesSelector.java| 34 +---
 .../scheduler/capacity/AbstractCSQueue.java |  2 +-
 .../allocator/RegularContainerAllocator.java|  8 ++---
 .../scheduler/common/fica/FiCaSchedulerApp.java | 21 +---
 .../scheduler/capacity/TestReservations.java| 20 +---
 13 files changed, 55 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f418439/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index bdf60bd..7f155e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -123,8 +123,7 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster,
-  Resource smaller, Resource bigger) {
+  public boolean fitsIn(Resource smaller, Resource bigger) {
 return smaller.getMemorySize() <= bigger.getMemorySize();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f418439/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index d64f03e..ca828a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -538,7 +538,7 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster, Resource smaller, Resource bigger) {
+  public boolean fitsIn(Resource smaller, Resource bigger) {
 int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
 for (int i = 0; i < maxLength; i++) {
   ResourceInformation sResourceInformation = smaller

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f418439/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 398dac5..d59560f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ 

[08/44] hadoop git commit: YARN-6232. Update resource usage and preempted resource calculations to take into account all resource types. Contributed by Varun Vasudev.

2017-10-31 Thread templedf
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bda5486d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
index e5e31e0..8553d8c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
@@ -44,6 +44,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
@@ -106,15 +107,12 @@ public class RMAppBlock extends AppBlock{
   attemptResourcePreempted)
 .__("Number of Non-AM Containers Preempted from Current Attempt:",
   attemptNumNonAMContainerPreempted)
-.__("Aggregate Resource Allocation:",
-  String.format("%d MB-seconds, %d vcore-seconds",
-  appMetrics == null ? "N/A" : appMetrics.getMemorySeconds(),
-  appMetrics == null ? "N/A" : appMetrics.getVcoreSeconds()))
+.__("Aggregate Resource Allocation:", appMetrics == null ? "N/A" :
+StringHelper
+.getResourceSecondsString(appMetrics.getResourceSecondsMap()))
 .__("Aggregate Preempted Resource Allocation:",
-  String.format("%d MB-seconds, %d vcore-seconds",
-appMetrics == null ? "N/A" : 
appMetrics.getPreemptedMemorySeconds(),
-appMetrics == null ? "N/A" :
-appMetrics.getPreemptedVcoreSeconds()));
+appMetrics == null ? "N/A" : StringHelper.getResourceSecondsString(
+appMetrics.getPreemptedResourceSecondsMap()));
 
 pdiv.__();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bda5486d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 4365eee..40ef695 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -101,6 +101,7 @@ public class AppInfo {
   private long vcoreSeconds;
   protected float queueUsagePercentage;
   protected float clusterUsagePercentage;
+  protected Map resourceSecondsMap;
 
   // preemption info fields
   private long preemptedResourceMB;
@@ -109,6 +110,7 @@ public class AppInfo {
   private int numAMContainerPreempted;
   private long preemptedMemorySeconds;
   private long preemptedVcoreSeconds;
+  protected Map preemptedResourceSecondsMap;
 
   // list of resource requests
   @XmlElement(name = "resourceRequests")
@@ -236,8 +238,10 @@ public class AppInfo {
   appMetrics.getResourcePreempted().getVirtualCores();
   memorySeconds = appMetrics.getMemorySeconds();
   vcoreSeconds = appMetrics.getVcoreSeconds();
+  resourceSecondsMap = appMetrics.getResourceSecondsMap();
   preemptedMemorySeconds = appMetrics.getPreemptedMemorySeconds();
   preemptedVcoreSeconds = appMetrics.getPreemptedVcoreSeconds();
+  preemptedResourceSecondsMap = 
appMetrics.getPreemptedResourceSecondsMap();
   ApplicationSubmissionContext appSubmissionContext =
   app.getApplicationSubmissionContext();
   unmanagedApplication = 

[19/44] hadoop git commit: YARN-5242. Update DominantResourceCalculator to consider all resource types in calculations. Contributed by Varun Vasudev.

2017-10-31 Thread templedf
YARN-5242. Update DominantResourceCalculator to consider all resource types in 
calculations. Contributed by Varun Vasudev.

(cherry picked from commit 9e4ba6aff595b7a5b53d016c97c8334e10f1f2cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feb6d00e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feb6d00e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feb6d00e

Branch: refs/heads/branch-3.0
Commit: feb6d00e2a0185dbb0710d568a586763150e7688
Parents: 16beac1
Author: Rohith Sharma K S 
Authored: Tue Jul 26 14:13:03 2016 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |  7 ++
 .../api/records/impl/pb/ResourcePBImpl.java |  2 +-
 .../resource/DominantResourceCalculator.java| 23 
 .../yarn/util/resource/ResourceUtils.java   |  5 +++--
 .../hadoop/yarn/util/resource/Resources.java|  6 +
 5 files changed, 31 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb6d00e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 1de7b2f..a25c8ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -327,6 +327,8 @@ public abstract class Resource implements 
Comparable {
 otherResources = other.getResources();
 long diff = thisResources.size() - otherResources.size();
 if (diff == 0) {
+  // compare memory and vcores first(in that order) to preserve
+  // existing behaviour
   if (thisResources.keySet().equals(otherResources.keySet())) {
 diff = this.getMemorySize() - other.getMemorySize();
 if (diff == 0) {
@@ -335,6 +337,11 @@ public abstract class Resource implements 
Comparable {
 if (diff == 0) {
   for (Map.Entry entry : thisResources
   .entrySet()) {
+if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
+|| entry.getKey()
+.equals(ResourceInformation.VCORES.getName())) {
+  continue;
+}
 diff =
 entry.getValue().compareTo(otherResources.get(entry.getKey()));
 if (diff != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb6d00e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index ab18f2d..5f32648 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -242,7 +242,7 @@ public class ResourcePBImpl extends Resource {
 builder.addResourceValueMap(e);
   }
 }
-builder.setMemory(this.getMemory());
+builder.setMemory(this.getMemorySize());
 builder.setVirtualCores(this.getVirtualCores());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb6d00e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 0412c0f..3c4413c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ 

[34/44] hadoop git commit: YARN-6789. Add Client API to get all supported resource types from RM. (Sunil G via wangda)

2017-10-31 Thread templedf
YARN-6789. Add Client API to get all supported resource types from RM. (Sunil G 
via wangda)

Change-Id: I366d8db6f6700acd087db5acb7a1be7e41b2b68d
(cherry picked from commit df3855541af98a3805958f2b5b9db6be705d52ab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4af3deae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4af3deae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4af3deae

Branch: refs/heads/branch-3.0
Commit: 4af3deaeb09430e7d4847a8bc7b6940bab205a1a
Parents: 255668a
Author: Wangda Tan 
Authored: Thu Aug 17 11:30:41 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   7 +
 .../hadoop/mapred/TestClientRedirect.java   |   9 +
 .../yarn/api/ApplicationClientProtocol.java |  17 ++
 .../GetAllResourceTypeInfoRequest.java  |  35 
 .../GetAllResourceTypeInfoResponse.java |  60 ++
 .../yarn/api/records/ResourceTypeInfo.java  | 196 +++
 .../yarn/util/resource/ResourceUtils.java   |  13 ++
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_protos.proto|   6 +
 .../src/main/proto/yarn_service_protos.proto|   7 +
 .../hadoop/yarn/client/api/YarnClient.java  |  16 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  11 ++
 .../ApplicationClientProtocolPBClientImpl.java  |  18 ++
 .../ApplicationClientProtocolPBServiceImpl.java |  21 ++
 .../pb/GetAllResourceTypeInfoRequestPBImpl.java |  70 +++
 .../GetAllResourceTypeInfoResponsePBImpl.java   | 184 +
 .../api/records/impl/pb/ResourcePBImpl.java |  12 +-
 .../records/impl/pb/ResourceTypeInfoPBImpl.java | 154 +++
 .../hadoop/yarn/api/TestPBImplRecords.java  |  25 +++
 .../hadoop/yarn/api/TestResourcePBImpl.java |  61 ++
 .../yarn/server/MockResourceManagerFacade.java  |   8 +
 .../server/resourcemanager/ClientRMService.java |  11 ++
 .../resourcemanager/TestClientRMService.java|  45 +
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 .../DefaultClientRequestInterceptor.java|   8 +
 .../clientrm/FederationClientInterceptor.java   |   7 +
 .../router/clientrm/RouterClientRMService.java  |   9 +
 .../PassThroughClientRequestInterceptor.java|   8 +
 28 files changed, 1014 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4af3deae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 62aa497..ae05165 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
 import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -517,4 +518,10 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 client.killApplication(appId, diagnostics);
   }
+
+  @Override
+  public List getResourceTypeInfo()
+  throws YarnException, IOException {
+return client.getResourceTypeInfo();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4af3deae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 65eac65..8b6ea64 100644
--- 

[18/44] hadoop git commit: YARN-6994. [YARN-3926] Remove last uses of Long from resource types code. (Daniel Templeton via Yufei Gu)

2017-10-31 Thread templedf
YARN-6994. [YARN-3926] Remove last uses of Long from resource types code. 
(Daniel Templeton via Yufei Gu)

(cherry picked from commit d5e9939ebb59e4f6f8e5e2fe7f619fb032f2911c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c4505f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c4505f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c4505f3

Branch: refs/heads/branch-3.0
Commit: 9c4505f3fa39412084cabf76e61880a64ba1c7a2
Parents: 99c2924
Author: Yufei Gu 
Authored: Mon Aug 14 11:18:08 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../main/java/org/apache/hadoop/yarn/api/records/Resource.java   | 4 ++--
 .../apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java   | 4 ++--
 .../java/org/apache/hadoop/yarn/util/resource/Resources.java | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c4505f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index bbd4c87..54f0b18 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -273,14 +273,14 @@ public abstract class Resource implements 
Comparable {
*/
   @Public
   @Evolving
-  public void setResourceValue(String resource, Long value)
+  public void setResourceValue(String resource, long value)
   throws ResourceNotFoundException {
 if (resource.equals(MEMORY)) {
   this.setMemorySize(value);
   return;
 }
 if (resource.equals(VCORES)) {
-  this.setVirtualCores(value.intValue());
+  this.setVirtualCores((int)value);
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c4505f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 00be77a..2de338a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 import java.util.Arrays;
 import java.util.Map;
@@ -174,7 +174,7 @@ public class ResourcePBImpl extends BaseResource {
   }
 
   @Override
-  public void setResourceValue(String resource, Long value)
+  public void setResourceValue(String resource, long value)
   throws ResourceNotFoundException {
 maybeInitBuilder();
 if (resource == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c4505f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index f62114d..3cf78ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -104,7 +104,7 @@ public class Resources {
 }
 
 @Override
-public void setResourceValue(String resource, Long value)
+public void setResourceValue(String 

[29/44] hadoop git commit: YARN-7137. [YARN-3926] Move newly added APIs to unstable in YARN-3926 branch. Contributed by Wangda Tan.

2017-10-31 Thread templedf
YARN-7137. [YARN-3926] Move newly added APIs to unstable in YARN-3926 branch. 
Contributed by Wangda Tan.

(cherry picked from commit da0b6a354bf6f6bf37ca5a05a4a8eece09aa4893)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74030d80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74030d80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74030d80

Branch: refs/heads/branch-3.0
Commit: 74030d808cd95e26a0c48500c08d269fcb4150ee
Parents: 0db9ddb
Author: Sunil G 
Authored: Tue Sep 12 20:31:47 2017 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   | 24 ++--
 .../yarn/api/records/ResourceRequest.java   |  1 +
 .../yarn/util/resource/ResourceUtils.java   | 19 
 .../hadoop/yarn/util/resource/package-info.java |  6 +
 .../resourcemanager/webapp/dao/AppInfo.java |  2 +-
 5 files changed, 15 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74030d80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 37b50f2..9a5bc79 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -206,8 +206,8 @@ public abstract class Resource implements 
Comparable {
*
* @return Map of resource name to ResourceInformation
*/
-  @Public
-  @Evolving
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   public ResourceInformation[] getResources() {
 return resources;
   }
@@ -220,7 +220,7 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException if the resource can't be found
*/
   @Public
-  @Evolving
+  @InterfaceStability.Unstable
   public ResourceInformation getResourceInformation(String resource)
   throws ResourceNotFoundException {
 Integer index = ResourceUtils.getResourceTypeIndex().get(resource);
@@ -240,8 +240,8 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException
*   if the resource can't be found
*/
-  @Public
-  @Evolving
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   public ResourceInformation getResourceInformation(int index)
   throws ResourceNotFoundException {
 ResourceInformation ri = null;
@@ -262,7 +262,7 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException if the resource can't be found
*/
   @Public
-  @Evolving
+  @InterfaceStability.Unstable
   public long getResourceValue(String resource)
   throws ResourceNotFoundException {
 return getResourceInformation(resource).getValue();
@@ -276,7 +276,7 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException if the resource is not found
*/
   @Public
-  @Evolving
+  @InterfaceStability.Unstable
   public void setResourceInformation(String resource,
   ResourceInformation resourceInformation)
   throws ResourceNotFoundException {
@@ -302,8 +302,8 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException
*   if the resource is not found
*/
-  @Public
-  @Evolving
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   public void setResourceInformation(int index,
   ResourceInformation resourceInformation)
   throws ResourceNotFoundException {
@@ -323,7 +323,7 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException if the resource is not found
*/
   @Public
-  @Evolving
+  @InterfaceStability.Unstable
   public void setResourceValue(String resource, long value)
   throws ResourceNotFoundException {
 if (resource.equals(ResourceInformation.MEMORY_URI)) {
@@ -350,8 +350,8 @@ public abstract class Resource implements 
Comparable {
* @throws ResourceNotFoundException
*   if the resource is not found
*/
-  @Public
-  @Evolving
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   public void setResourceValue(int index, long value)
   throws ResourceNotFoundException {
 try {


[14/44] hadoop git commit: YARN-6786. [YARN-3926] ResourcePBImpl imports cleanup. Contributed by Yeliang Cang.

2017-10-31 Thread templedf
YARN-6786. [YARN-3926] ResourcePBImpl imports cleanup. Contributed by Yeliang 
Cang.

(cherry picked from commit 4e5632d28eb8f6b1b8830c2aa89ec6321d477977)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58f2c070
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58f2c070
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58f2c070

Branch: refs/heads/branch-3.0
Commit: 58f2c0702884b712e9bafd806c8ef13bdac5
Parents: 5fa0dc3
Author: Sunil G 
Authored: Thu Jul 13 16:30:59 2017 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58f2c070/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index bba5f7e..6e51efa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.api.records.impl.pb;
 
-import org.apache.commons.collections.map.UnmodifiableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -34,7 +33,10 @@ import 
org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 
-import java.util.*;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Collections;
+
 
 @Private
 @Unstable


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/44] hadoop git commit: YARN-6232. Update resource usage and preempted resource calculations to take into account all resource types. Contributed by Varun Vasudev.

2017-10-31 Thread templedf
YARN-6232. Update resource usage and preempted resource calculations to take 
into account all resource types. Contributed by Varun Vasudev.

(cherry picked from commit dae65f3bef8ffa34d02a37041f1dfdf91845)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bda5486d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bda5486d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bda5486d

Branch: refs/heads/branch-3.0
Commit: bda5486dade3871989ce04d42ee17906b1774b9b
Parents: 4045ef0
Author: Sunil G 
Authored: Mon Mar 6 11:34:20 2017 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../records/ApplicationResourceUsageReport.java |  58 ++-
 .../src/main/proto/yarn_protos.proto|   7 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  35 +++--
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  16 +-
 .../ApplicationResourceUsageReportPBImpl.java   | 151 ---
 .../yarn/api/records/impl/pb/ProtoUtils.java|  34 +
 .../apache/hadoop/yarn/util/StringHelper.java   |  36 +
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  12 ++
 .../hadoop/yarn/api/TestPBImplRecords.java  |   4 +
 ...pplicationHistoryManagerOnTimelineStore.java |  18 ++-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  16 +-
 .../server/resourcemanager/RMAppManager.java|   8 +-
 .../server/resourcemanager/RMServerUtils.java   |  10 +-
 .../resourcemanager/recovery/RMStateStore.java  |   7 +-
 .../records/ApplicationAttemptStateData.java|  89 +--
 .../pb/ApplicationAttemptStateDataPBImpl.java   |  50 ++
 .../server/resourcemanager/rmapp/RMAppImpl.java |  43 +++---
 .../resourcemanager/rmapp/RMAppMetrics.java |  41 +++--
 .../attempt/AggregateAppResourceUsage.java  |  34 ++---
 .../rmapp/attempt/RMAppAttemptImpl.java |  32 ++--
 .../rmapp/attempt/RMAppAttemptMetrics.java  | 106 ++---
 .../rmcontainer/RMContainerImpl.java|  16 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  47 +++---
 .../resourcemanager/webapp/RMAppBlock.java  |  14 +-
 .../resourcemanager/webapp/dao/AppInfo.java |  28 
 .../webapp/dao/ResourceInfo.java|  36 -
 .../webapp/dao/SchedulerInfo.java   |   2 +-
 .../yarn_server_resourcemanager_recovery.proto  |   2 +
 .../server/resourcemanager/TestAppManager.java  |   7 +-
 .../TestContainerResourceUsage.java |   7 +-
 .../applicationsmanager/MockAsm.java|   9 +-
 .../metrics/TestSystemMetricsPublisher.java |  15 +-
 .../TestSystemMetricsPublisherForV2.java|  22 ++-
 .../recovery/RMStateStoreTestBase.java  |   8 +-
 .../recovery/TestZKRMStateStore.java|  23 +--
 .../resourcemanager/webapp/TestAppPage.java |   8 +-
 .../webapp/TestRMWebAppFairScheduler.java   |   5 +-
 37 files changed, 803 insertions(+), 253 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bda5486d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
index 3cf8f3d..f9c8975 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
@@ -24,6 +24,9 @@ import 
org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  * Contains various scheduling metrics to be reported by UI and CLI.
  */
@@ -35,9 +38,9 @@ public abstract class ApplicationResourceUsageReport {
   @Unstable
   public static ApplicationResourceUsageReport newInstance(
   int numUsedContainers, int numReservedContainers, Resource usedResources,
-  Resource reservedResources, Resource neededResources, long memorySeconds,
-  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc,
-  long preemptedMemorySeconds, long preemptedVcoresSeconds) {
+  Resource reservedResources, Resource neededResources,
+  Map resourceSecondsMap, float queueUsagePerc,
+  float clusterUsagePerc, Map 

[21/44] hadoop git commit: YARN-7067. [YARN-3926] Optimize ResourceType information display in UI. Contributed by Wangda Tan.

2017-10-31 Thread templedf
YARN-7067. [YARN-3926] Optimize ResourceType information display in UI. 
Contributed by Wangda Tan.

(cherry picked from commit a333ba54e3f5a1ca57b0fc94bfd008f3cbd5b14b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5dd008ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5dd008ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5dd008ca

Branch: refs/heads/branch-3.0
Commit: 5dd008ca2e9b03c606bf24cfb960f372efbbe752
Parents: 605fcde
Author: Sunil G 
Authored: Tue Aug 22 16:59:29 2017 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../apache/hadoop/yarn/api/records/ResourceTypeInfo.java |  7 +--
 .../resourcemanager/webapp/MetricsOverviewTable.java | 11 ---
 2 files changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dd008ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
index 6cb470c..d85cf0c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
@@ -153,8 +153,11 @@ public abstract class ResourceTypeInfo implements 
Comparable {
 
   @Override
   public String toString() {
-return "name: " + this.getName() + ", units: " + this.getDefaultUnit()
-+ ", type: " + getResourceType();
+StringBuilder sb = new StringBuilder();
+

[10/44] hadoop git commit: YARN-5587. [Partial backport] Add support for resource profiles. (vvasudev via asuresh)

2017-10-31 Thread templedf
YARN-5587. [Partial backport] Add support for resource profiles. (vvasudev via 
asuresh)

(cherry picked from commit 6708ac330147b2d3816a31f2ee83e09c41fe0dd9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9fa0349
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9fa0349
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9fa0349

Branch: refs/heads/branch-3.0
Commit: a9fa0349a08528abd96ce569695966fb64312275
Parents: 3d1b59e
Author: Arun Suresh 
Authored: Tue Nov 15 01:01:07 2016 -0800
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:16 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|   4 +
 .../hadoop/yarn/api/records/Resource.java   |  14 ++
 .../yarn/api/records/ResourceInformation.java   |  57 ++-
 .../yarn/client/api/impl/AMRMClientImpl.java|   7 +-
 .../api/records/impl/pb/ResourcePBImpl.java |   4 +-
 .../yarn/util/resource/ResourceUtils.java   | 161 ++-
 .../hadoop/yarn/util/resource/Resources.java|  10 +-
 .../scheduler/AbstractYarnScheduler.java|  48 +-
 .../scheduler/ClusterNodeTracker.java   |   3 +-
 .../scheduler/capacity/CapacityScheduler.java   |   4 +-
 .../scheduler/fair/FairScheduler.java   |   4 +-
 .../scheduler/fifo/FifoScheduler.java   |  13 +-
 .../yarn/server/resourcemanager/MockRM.java |   2 +
 .../server/resourcemanager/TestAppManager.java  |   1 +
 .../scheduler/fair/TestFairScheduler.java   |   4 +
 .../hadoop/yarn/server/MiniYARNCluster.java |   2 +
 16 files changed, 297 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9fa0349/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6825a36..ce7a9c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -154,6 +154,10 @@
 
   
   
+
+
+  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9fa0349/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index a25c8ac..10755b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.yarn.api.records;
 
 import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
@@ -101,6 +103,18 @@ public abstract class Resource implements 
Comparable {
 return new SimpleResource(memory, vCores);
   }
 
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public static Resource newInstance(Resource resource) {
+Resource ret = Resource.newInstance(0, 0);
+for (Map.Entry entry : resource.getResources()
+.entrySet()) {
+  ret.setResourceInformation(entry.getKey(),
+  ResourceInformation.newInstance(entry.getValue()));
+}
+return ret;
+  }
+
   /**
* This method is DEPRECATED:
* Use {@link Resource#getMemorySize()} instead

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9fa0349/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index a17e81b..7d74efc 100644
--- 

[36/44] hadoop git commit: YARN-6612. Update fair scheduler policies to be aware of resource types. (Contributed by Daniel Templeton via Yufei Gu)

2017-10-31 Thread templedf
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30f78a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
index 3719e2a..097558f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
@@ -23,15 +23,22 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.util.Comparator;
+import java.util.Map;
+import org.apache.curator.shaded.com.google.common.base.Joiner;
+import org.apache.hadoop.conf.Configuration;
 
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FakeSchedulable;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy.DominantResourceFairnessComparator;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
+import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 /**
@@ -39,10 +46,15 @@ import org.junit.Test;
  * container before sched2
  */
 public class TestDominantResourceFairnessPolicy {
+  @BeforeClass
+  public static void setup() {
+addResources("test");
+  }
 
   private Comparator createComparator(int clusterMem,
   int clusterCpu) {
-DominantResourceFairnessPolicy policy = new 
DominantResourceFairnessPolicy();
+DominantResourceFairnessPolicy policy =
+new DominantResourceFairnessPolicy();
 FSContext fsContext = mock(FSContext.class);
 when(fsContext.getClusterResource()).
 thenReturn(Resources.createResource(clusterMem, clusterCpu));
@@ -51,23 +63,23 @@ public class TestDominantResourceFairnessPolicy {
   }
   
   private Schedulable createSchedulable(int memUsage, int cpuUsage) {
-return createSchedulable(memUsage, cpuUsage, ResourceWeights.NEUTRAL, 0, 
0);
+return createSchedulable(memUsage, cpuUsage, 1.0f, 0, 0);
   }
   
   private Schedulable createSchedulable(int memUsage, int cpuUsage,
   int minMemShare, int minCpuShare) {
-return createSchedulable(memUsage, cpuUsage, ResourceWeights.NEUTRAL,
+return createSchedulable(memUsage, cpuUsage, 1.0f,
 minMemShare, minCpuShare);
   }
   
   private Schedulable createSchedulable(int memUsage, int cpuUsage,
-  ResourceWeights weights) {
+  float weights) {
 return createSchedulable(memUsage, cpuUsage, weights, 0, 0);
   }
 
   
   private Schedulable createSchedulable(int memUsage, int cpuUsage,
-  ResourceWeights weights, int minMemShare, int minCpuShare) {
+  float weights, int minMemShare, int minCpuShare) {
 Resource usage = BuilderUtils.newResource(memUsage, cpuUsage);
 Resource minShare = BuilderUtils.newResource(minMemShare, minCpuShare);
 return new FakeSchedulable(minShare,
@@ -77,94 +89,260 @@ public class TestDominantResourceFairnessPolicy {
   
   @Test
   public void testSameDominantResource() {
-assertTrue(createComparator(8000, 4).compare(
-createSchedulable(1000, 1),
-createSchedulable(2000, 1)) < 0);
+Comparator c = createComparator(8000, 4);
+Schedulable s1 = createSchedulable(1000, 1);
+Schedulable s2 = createSchedulable(2000, 1);
+
+assertTrue("Comparison didn't return a value less than 0",
+c.compare(s1, s2) < 0);
   }
   
   @Test
   public void testDifferentDominantResource() {
-assertTrue(createComparator(8000, 8).compare(
-createSchedulable(4000, 3),
-

[31/44] hadoop git commit: YARN-7056. [Partial backport] Document Resource Profiles feature. (Sunil G via wangda)

2017-10-31 Thread templedf
YARN-7056. [Partial backport] Document Resource Profiles feature. (Sunil G via 
wangda)

Change-Id: I5f5d52cdf7aba4ac09684aab573e6916dbad183e
(cherry picked from commit 6b88cd1901a65b977fed759e322dcd75a2cd33b6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/335e3a63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/335e3a63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/335e3a63

Branch: refs/heads/branch-3.0
Commit: 335e3a633424c743f9f4bd1658728f979dfe43e0
Parents: 4bf2991
Author: Wangda Tan 
Authored: Thu Sep 7 10:07:35 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 hadoop-project/src/site/site.xml|  1 +
 .../src/site/markdown/ResourceProfiles.md   | 79 
 2 files changed, 80 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/335e3a63/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 35e743a..2aa1da7 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -144,6 +144,7 @@
   
   
   
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/335e3a63/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceProfiles.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceProfiles.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceProfiles.md
new file mode 100644
index 000..e7b38e1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceProfiles.md
@@ -0,0 +1,79 @@
+
+
+Hadoop: YARN Resource Types
+===
+
+Overview
+
+Resource types support in YARN helps to extend the YARN resource model to a 
more flexible model which makes it easier to add new countable resource­types. 
This solution also helps the users to submit jobs with ease to specify the 
resources they need.
+
+Resource model of YARN
+---
+Resource Manager will load a new configuration file named `resource-types.xml` 
to determine the set of resource ­types for which scheduling is enabled. 
Sample XML will look like below.
+
+```xml
+
+  
+yarn.resource-types
+resource1, resource2
+  
+
+  
+yarn.resource-types.resource1.units
+G
+  
+
+```
+
+Similarly, a new configuration file `node­-resources.xml` will also be loaded 
by Node Manager where the resource capabilities of a node can be specified.
+
+```xml
+
+ 
+   yarn.nodemanager.resource-type.resource1
+   5G
+ 
+
+ 
+   yarn.nodemanager.resource-type.resource2
+   2m
+ 
+
+
+```
+
+Node Manager will use these custom resource types and will register it's 
capability to Resource Manager.
+
+Configurations
+-
+
+Please note that, `resource-types.xml` and `node­-resources.xml` file also 
need to be placed in conf directory if new resources are to be added to YARN.
+
+*In `resource-types.xml`*
+
+| Configuration Property | Value | Description |
+|: |: |: |
+| `yarn.resource-types` | resource1 | Custom resource  |
+| `yarn.resource-types.resource1.units` | G | Default unit for resource1 type  
|
+
+*In `node­-resources.xml`*
+
+| Configuration Property | Value | Description |
+|: |: |: |
+| `yarn.nodemanager.resource-type.resource1` | 5G | Resource capability for 
resource named 'resource1'. |
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/44] hadoop git commit: YARN-6781. [YARN-3926] ResourceUtils#initializeResourcesMap takes an unnecessary Map parameter. Contributed by Yu-Tang Lin.

2017-10-31 Thread templedf
YARN-6781. [YARN-3926] ResourceUtils#initializeResourcesMap takes an 
unnecessary Map parameter. Contributed by Yu-Tang Lin.

(cherry picked from commit 758b7719430c981a6d4d1644d9d03cd16b981cf3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2b881a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2b881a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2b881a5

Branch: refs/heads/branch-3.0
Commit: f2b881a5052cc37546cf0ea497162415dbf6695f
Parents: 4af3dea
Author: Sunil G 
Authored: Fri Aug 18 19:00:49 2017 +0530
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../hadoop/yarn/util/resource/ResourceUtils.java  | 14 +++---
 .../hadoop/yarn/util/resource/TestResourceUtils.java  |  8 
 2 files changed, 11 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2b881a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 5ed5712..997c2c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -201,9 +201,9 @@ public class ResourceUtils {
   }
 
   @VisibleForTesting
-  static void initializeResourcesMap(Configuration conf,
-  Map resourceInformationMap) {
+  static void initializeResourcesMap(Configuration conf) {
 
+Map resourceInformationMap = new HashMap<>();
 String[] resourceNames = conf.getStrings(YarnConfiguration.RESOURCE_TYPES);
 
 if (resourceNames != null && resourceNames.length != 0) {
@@ -339,19 +339,18 @@ public class ResourceUtils {
 if (!initializedResources) {
   synchronized (ResourceUtils.class) {
 if (!initializedResources) {
-  Map resources = new HashMap<>();
   if (conf == null) {
 conf = new YarnConfiguration();
   }
   try {
 addResourcesFileToConf(resourceFile, conf);
 LOG.debug("Found " + resourceFile + ", adding to configuration");
-initializeResourcesMap(conf, resources);
+initializeResourcesMap(conf);
 initializedResources = true;
   } catch (FileNotFoundException fe) {
 LOG.info("Unable to find '" + resourceFile
 + "'. Falling back to memory and vcores as resources", fe);
-initializeResourcesMap(conf, resources);
+initializeResourcesMap(conf);
 initializedResources = true;
   }
 }
@@ -414,11 +413,12 @@ public class ResourceUtils {
   }
 
   @VisibleForTesting
-  public static void resetResourceTypes(Configuration conf) {
+  public static Map
+  resetResourceTypes(Configuration conf) {
 synchronized (ResourceUtils.class) {
   initializedResources = false;
 }
-getResourceTypes(conf);
+return getResourceTypes(conf);
   }
 
   public static String getUnits(String resourceValue) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2b881a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
index b530150..4e4671a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -183,8 +183,9 @@ public class TestResourceUtils {
 YarnConfiguration.RESOURCE_TYPES + "." + resources[0] + ".units";
 conf.set(name, resources[1]);
   }
-  Map ret = new HashMap<>();
-  ResourceUtils.initializeResourcesMap(conf, ret);
+  Map ret =
+  ResourceUtils.resetResourceTypes(conf);
+
   // for test1, 

[02/44] hadoop git commit: YARN-4172. Extend DominantResourceCalculator to account for all resources. (Varun Vasudev via wangda)

2017-10-31 Thread templedf
YARN-4172. Extend DominantResourceCalculator to account for all resources. 
(Varun Vasudev via wangda)

(cherry picked from commit 32c91223f1bd06561ea4ce2d1944e8d9a847f18c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33fabe1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33fabe1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33fabe1e

Branch: refs/heads/branch-3.0
Commit: 33fabe1e10f8e7b58bcabbf6de8ca88d0ac3f891
Parents: 68da521
Author: Wangda Tan 
Authored: Fri Jan 29 10:53:31 2016 +0800
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:15 2017 -0700

--
 .../resource/DominantResourceCalculator.java| 380 +--
 1 file changed, 273 insertions(+), 107 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33fabe1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 7697e1d..a94e7a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -22,25 +22,31 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+
+import java.util.HashSet;
+import java.util.Set;
 
 /**
- * A {@link ResourceCalculator} which uses the concept of  
+ * A {@link ResourceCalculator} which uses the concept of
  * dominant resource to compare multi-dimensional resources.
  *
- * Essentially the idea is that the in a multi-resource environment, 
- * the resource allocation should be determined by the dominant share 
- * of an entity (user or queue), which is the maximum share that the 
- * entity has been allocated of any resource. 
- * 
- * In a nutshell, it seeks to maximize the minimum dominant share across 
- * all entities. 
- * 
+ * Essentially the idea is that the in a multi-resource environment,
+ * the resource allocation should be determined by the dominant share
+ * of an entity (user or queue), which is the maximum share that the
+ * entity has been allocated of any resource.
+ *
+ * In a nutshell, it seeks to maximize the minimum dominant share across
+ * all entities.
+ *
  * For example, if user A runs CPU-heavy tasks and user B runs
- * memory-heavy tasks, it attempts to equalize CPU share of user A 
- * with Memory-share of user B. 
- * 
+ * memory-heavy tasks, it attempts to equalize CPU share of user A
+ * with Memory-share of user B.
+ *
  * In the single resource case, it reduces to max-min fairness for that 
resource.
- * 
+ *
  * See the Dominant Resource Fairness paper for more details:
  * www.cs.berkeley.edu/~matei/papers/2011/nsdi_drf.pdf
  */
@@ -50,6 +56,56 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   private static final Log LOG =
   LogFactory.getLog(DominantResourceCalculator.class);
 
+
+  private Set resourceNames;
+
+  public DominantResourceCalculator() {
+resourceNames = new HashSet<>();
+resourceNames.add(ResourceInformation.MEMORY.getName());
+resourceNames.add(ResourceInformation.VCORES.getName());
+  }
+
+  /**
+   * Compare two resources - if the value for every resource type for the lhs
+   * is greater than that of the rhs, return 1. If the value for every resource
+   * type in the lhs is less than the rhs, return -1. Otherwise, return 0
+   *
+   * @param lhs resource to be compared
+   * @param rhs resource to be compared
+   * @return 0, 1, or -1
+   */
+  private int compare(Resource lhs, Resource rhs) {
+boolean lhsGreater = false;
+boolean rhsGreater = false;
+int ret = 0;
+
+for (String rName : resourceNames) {
+  try {
+ResourceInformation lhsResourceInformation =
+lhs.getResourceInformation(rName);
+ResourceInformation rhsResourceInformation =
+rhs.getResourceInformation(rName);
+int diff = 

[33/44] hadoop git commit: YARN-7042. Clean up unit tests after YARN-6610. (Daniel Templeton via wangda)

2017-10-31 Thread templedf
YARN-7042. Clean up unit tests after YARN-6610. (Daniel Templeton via wangda)

Change-Id: I8e40f704b6fcdd5b14faa9548a27986501044fa1
(cherry picked from commit b1fe3a222e7673fd84a878622969f958022061e9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/255668a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/255668a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/255668a2

Branch: refs/heads/branch-3.0
Commit: 255668a2a6d5db2d3fbdbefa0df8b7c1a37a05b3
Parents: 8827d93
Author: Wangda Tan 
Authored: Thu Aug 17 11:18:08 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Oct 31 15:06:17 2017 -0700

--
 .../resource/DominantResourceCalculator.java|  2 +-
 .../util/resource/TestResourceCalculator.java   | 95 ++--
 2 files changed, 49 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/255668a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 40b38b9..1e99bc7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -126,7 +126,7 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 diff = max[0] - max[1];
   } else if (clusterRes.length == 2) {
 // Special case to handle the common scenario of only CPU and memory
-// so the we can optimize for performance
+// so that we can optimize for performance
 diff = calculateSharesForMandatoryResources(clusterRes, lhs, rhs,
 lhsShares, rhsShares);
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/255668a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
index 19e7f8d..5b4155c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
@@ -24,7 +24,7 @@ import java.util.Collection;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -44,13 +44,18 @@ public class TestResourceCalculator {
 { new DominantResourceCalculator() } });
   }
 
-  @BeforeClass
-  public static void setup() {
+  @Before
+  public void setupNoExtraResource() {
+// This has to run before each test because we don't know when
+// setupExtraResource() might be called
+ResourceUtils.resetResourceTypes(new Configuration());
+  }
+
+  private static void setupExtraResource() {
 Configuration conf = new Configuration();
 
 conf.set(YarnConfiguration.RESOURCE_TYPES, "test");
 ResourceUtils.resetResourceTypes(conf);
-ResourceUtils.getResourceTypes();
   }
 
   public TestResourceCalculator(ResourceCalculator rs) {
@@ -86,9 +91,15 @@ public class TestResourceCalculator {
 }
   }
 
-  private Resource newResource(long memory, int cpu, int test) {
+  private Resource newResource(long memory, int cpu) {
 Resource res = Resource.newInstance(memory, cpu);
 
+return res;
+  }
+
+  private Resource newResource(long memory, int cpu, int test) {
+Resource res = newResource(memory, cpu);
+
 res.setResourceValue("test", test);
 
 return res;
@@ -123,28 +134,48 @@ public class TestResourceCalculator {
   }
 
   @Test
-  public void testCompare2() {
+  public void testCompareWithOnlyMandatory() {
+// This test is necessary because there are optimizations that are only
+// triggered 

  1   2   3   4   5   6   >