hadoop git commit: HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.

2017-08-11 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7d3cea232 -> ed4d6aa2c


HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . 
Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.

(cherry picked from commit 8b242f09a61a7536d2422546bfa6c2aaf1d57ed6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed4d6aa2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed4d6aa2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed4d6aa2

Branch: refs/heads/branch-2
Commit: ed4d6aa2c12959755ee5c8c5785e4f539e48ed32
Parents: 7d3cea2
Author: John Zhuge 
Authored: Fri Aug 11 20:06:22 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 21:55:23 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 11 ++--
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 63 
 2 files changed, 70 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed4d6aa2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 1cd420e..8c8383ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1357,8 +1357,9 @@ public class DFSInputStream extends FSInputStream
 Future firstRequest = hedgedService
 .submit(getFromDataNodeCallable);
 futures.add(firstRequest);
+Future future = null;
 try {
-  Future future = hedgedService.poll(
+  future = hedgedService.poll(
   conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
   if (future != null) {
 ByteBuffer result = future.get();
@@ -1368,16 +1369,18 @@ public class DFSInputStream extends FSInputStream
   }
   DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
   + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
-  // Ignore this node on next go around.
-  ignored.add(chosenNode.info);
   dfsClient.getHedgedReadMetrics().incHedgedReadOps();
   // continue; no need to refresh block locations
 } catch (ExecutionException e) {
-  // Ignore
+  futures.remove(future);
 } catch (InterruptedException e) {
   throw new InterruptedIOException(
   "Interrupted while waiting for reading task");
 }
+// Ignore this node on next go around.
+// If poll timeout and the request still ongoing, don't consider it
+// again. If read data failed, don't consider it either.
+ignored.add(chosenNode.info);
   } else {
 // We are starting up a 'hedged' read. We have a read already
 // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed4d6aa2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 865724a..903ee6c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -60,6 +60,8 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Supplier;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
 
 /**
  * This class tests the DFS positional read functionality in a single node
@@ -73,6 +75,9 @@ public class TestPread {
   boolean simulatedStorage;
   boolean isHedgedRead;
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestPread.class.getName());
+
   @Before
   public void setup() {
 simulatedStorage = false;
@@ -554,6 +559,64 @@ public class TestPread {
 }
   }
 
+  @Test(timeout=3)
+  public void testHedgedReadFromAllDNFailed() throws IOException {
+Configuration conf = new Configuration();
+int numHedgedReadPoolThreads = 5;
+final int hedgedReadTimeoutMillis = 50;
+
+conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
+numHedgedReadPoolThreads);
+

hadoop git commit: Revert "YARN-6882. AllocationFileLoaderService.reloadAllocations() should use the diamond operator"

2017-08-11 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ce0cdc50d -> 7d3cea232


Revert "YARN-6882. AllocationFileLoaderService.reloadAllocations() should use 
the diamond operator"

This reverts commit ce0cdc50d9700118139e7907dede274d2db93fda.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d3cea23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d3cea23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d3cea23

Branch: refs/heads/branch-2
Commit: 7d3cea232db4ce8249e9b078f1bfea00e0e4cadc
Parents: ce0cdc5
Author: Daniel Templeton 
Authored: Fri Aug 11 20:52:07 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 20:52:07 2017 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d3cea23/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 5696a97..a142d37 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -267,7 +267,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 Map configuredQueues = new HashMap<>();
 
 for (FSQueueType queueType : FSQueueType.values()) {
-  configuredQueues.put(queueType, new HashSet<>());
+  configuredQueues.put(queueType, new HashSet());
 }
 
 // Read and parse the allocations file.
@@ -281,7 +281,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   throw new AllocationConfigurationException("Bad fair scheduler config " +
   "file: top-level element not ");
 NodeList elements = root.getChildNodes();
-List queueElements = new ArrayList<>();
+List queueElements = new ArrayList();
 Element placementPolicyElement = null;
 for (int i = 0; i < elements.getLength(); i++) {
   Node node = elements.item(i);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.

2017-08-11 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/trunk 28d97b79b -> 8b242f09a


HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . 
Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b242f09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b242f09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b242f09

Branch: refs/heads/trunk
Commit: 8b242f09a61a7536d2422546bfa6c2aaf1d57ed6
Parents: 28d97b7
Author: John Zhuge 
Authored: Thu Aug 10 14:04:36 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 19:42:07 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 11 ++--
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 63 
 2 files changed, 70 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b242f09/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index dcc997c..6bff172 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1131,8 +1131,9 @@ public class DFSInputStream extends FSInputStream
 Future firstRequest = hedgedService
 .submit(getFromDataNodeCallable);
 futures.add(firstRequest);
+Future future = null;
 try {
-  Future future = hedgedService.poll(
+  future = hedgedService.poll(
   conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
   if (future != null) {
 ByteBuffer result = future.get();
@@ -1142,16 +1143,18 @@ public class DFSInputStream extends FSInputStream
   }
   DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
   + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
-  // Ignore this node on next go around.
-  ignored.add(chosenNode.info);
   dfsClient.getHedgedReadMetrics().incHedgedReadOps();
   // continue; no need to refresh block locations
 } catch (ExecutionException e) {
-  // Ignore
+  futures.remove(future);
 } catch (InterruptedException e) {
   throw new InterruptedIOException(
   "Interrupted while waiting for reading task");
 }
+// Ignore this node on next go around.
+// If poll timeout and the request still ongoing, don't consider it
+// again. If read data failed, don't consider it either.
+ignored.add(chosenNode.info);
   } else {
 // We are starting up a 'hedged' read. We have a read already
 // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b242f09/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 85fc97b..bcb02b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -59,6 +59,8 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Supplier;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
 
 /**
  * This class tests the DFS positional read functionality in a single node
@@ -72,6 +74,9 @@ public class TestPread {
   boolean simulatedStorage;
   boolean isHedgedRead;
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestPread.class.getName());
+
   @Before
   public void setup() {
 simulatedStorage = false;
@@ -551,6 +556,64 @@ public class TestPread {
 }
   }
 
+  @Test(timeout=3)
+  public void testHedgedReadFromAllDNFailed() throws IOException {
+Configuration conf = new Configuration();
+int numHedgedReadPoolThreads = 5;
+final int hedgedReadTimeoutMillis = 50;
+
+conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
+numHedgedReadPoolThreads);
+conf.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
+

hadoop git commit: YARN-6687. Validate that the duration of the periodic reservation is less than the periodicity. (subru via curino)

2017-08-11 Thread curino
Repository: hadoop
Updated Branches:
  refs/heads/trunk cc59b5fb2 -> 28d97b79b


YARN-6687. Validate that the duration of the periodic reservation is less than 
the periodicity. (subru via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28d97b79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28d97b79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28d97b79

Branch: refs/heads/trunk
Commit: 28d97b79b69bb2be02d9320105e155eeed6f9e78
Parents: cc59b5f
Author: Carlo Curino 
Authored: Fri Aug 11 16:58:04 2017 -0700
Committer: Carlo Curino 
Committed: Fri Aug 11 16:58:04 2017 -0700

--
 .../reservation/ReservationInputValidator.java  | 18 ++--
 .../TestReservationInputValidator.java  | 93 
 2 files changed, 106 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
index 0e9a825..027d066 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
@@ -129,11 +129,12 @@ public class ReservationInputValidator {
   Resources.multiply(rr.getCapability(), rr.getConcurrency()));
 }
 // verify the allocation is possible (skip for ANY)
-if (contract.getDeadline() - contract.getArrival() < minDuration
+long duration = contract.getDeadline() - contract.getArrival();
+if (duration < minDuration
 && type != ReservationRequestInterpreter.R_ANY) {
   message =
   "The time difference ("
-  + (contract.getDeadline() - contract.getArrival())
+  + (duration)
   + ") between arrival (" + contract.getArrival() + ") "
   + "and deadline (" + contract.getDeadline() + ") must "
   + " be greater or equal to the minimum resource duration ("
@@ -158,15 +159,22 @@ public class ReservationInputValidator {
 // check that the recurrence is a positive long value.
 String recurrenceExpression = contract.getRecurrenceExpression();
 try {
-  Long recurrence = Long.parseLong(recurrenceExpression);
+  long recurrence = Long.parseLong(recurrenceExpression);
   if (recurrence < 0) {
 message = "Negative Period : " + recurrenceExpression + ". Please try"
-+ " again with a non-negative long value as period";
++ " again with a non-negative long value as period.";
+throw RPCUtil.getRemoteException(message);
+  }
+  // verify duration is less than recurrence for periodic reservations
+  if (recurrence > 0 && duration > recurrence) {
+message = "Duration of the requested reservation: " + duration
++ " is greater than the recurrence: " + recurrence
++ ". Please try again with a smaller duration.";
 throw RPCUtil.getRemoteException(message);
   }
 } catch (NumberFormatException e) {
   message = "Invalid period " + recurrenceExpression + ". Please try"
-  + " again with a non-negative long value as period";
+  + " again with a non-negative long value as period.";
   throw RPCUtil.getRemoteException(message);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
 

hadoop git commit: YARN-6935. [YARN-3926] ResourceProfilesManagerImpl.parseResource() has no need of the key parameter (Contributed by Manikandan R via Daniel Templeton)

2017-08-11 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3926 1b586d700 -> 5452a0748


YARN-6935. [YARN-3926] ResourceProfilesManagerImpl.parseResource() has no need 
of the key parameter
(Contributed by Manikandan R via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5452a074
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5452a074
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5452a074

Branch: refs/heads/YARN-3926
Commit: 5452a07487388a9f9ad3759c61bdbc606d834eb9
Parents: 1b586d7
Author: Daniel Templeton 
Authored: Fri Aug 11 16:32:13 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 16:32:13 2017 -0700

--
 .../resource/ResourceProfilesManagerImpl.java   | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5452a074/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
index ab6..b5ab384 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
@@ -87,22 +87,22 @@ public class ResourceProfilesManagerImpl implements 
ResourceProfilesManager {
 Iterator iterator = data.entrySet().iterator();
 while (iterator.hasNext()) {
   Map.Entry entry = (Map.Entry) iterator.next();
-  String key = entry.getKey().toString();
-  if (key.isEmpty()) {
+  String profileName = entry.getKey().toString();
+  if (profileName.isEmpty()) {
 throw new IOException(
 "Name of resource profile cannot be an empty string");
   }
   if (entry.getValue() instanceof Map) {
-Map value = (Map) entry.getValue();
+Map profileInfo = (Map) entry.getValue();
 // ensure memory and vcores are specified
-if (!value.containsKey(MEMORY) || !value.containsKey(VCORES)) {
+if (!profileInfo.containsKey(MEMORY) || 
!profileInfo.containsKey(VCORES)) {
   throw new IOException(
-  "Illegal resource profile definition; profile '" + key
+  "Illegal resource profile definition; profile '" + profileName
   + "' must contain '" + MEMORY + "' and '" + VCORES + "'");
 }
-Resource resource = parseResource(key, value);
-profiles.put(key, resource);
-LOG.info("Added profile '" + key + "' with resources " + resource);
+Resource resource = parseResource(profileInfo);
+profiles.put(profileName, resource);
+LOG.info("Added profile '" + profileName + "' with resources " + 
resource);
   }
 }
 // check to make sure mandatory profiles are present
@@ -116,9 +116,9 @@ public class ResourceProfilesManagerImpl implements 
ResourceProfilesManager {
 LOG.info("Loaded profiles " + profiles.keySet());
   }
 
-  private Resource parseResource(String key, Map value) throws IOException {
+  private Resource parseResource(Map profileInfo) throws IOException {
 Resource resource = Resource.newInstance(0, 0);
-Iterator iterator = value.entrySet().iterator();
+Iterator iterator = profileInfo.entrySet().iterator();
 Map resourceTypes = ResourceUtils
 .getResourceTypes();
 while (iterator.hasNext()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6896. Federation: routing REST invocations transparently to multiple RMs (part 1 - basic execution). (Contributed by Giovanni Matteo Fumarola via curino)

2017-08-11 Thread curino
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0996acde6 -> cc59b5fb2


YARN-6896. Federation: routing REST invocations transparently to multiple RMs 
(part 1 - basic execution). (Contributed by Giovanni Matteo Fumarola via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc59b5fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc59b5fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc59b5fb

Branch: refs/heads/trunk
Commit: cc59b5fb26ccf58dffcd8850fa12ec65250f127d
Parents: 0996acd
Author: Carlo Curino 
Authored: Fri Aug 11 15:58:01 2017 -0700
Committer: Carlo Curino 
Committed: Fri Aug 11 15:58:01 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   2 +
 .../webapp/DefaultRequestInterceptorREST.java   |  16 +-
 .../webapp/FederationInterceptorREST.java   | 750 +++
 .../webapp/BaseRouterWebServicesTest.java   |  37 +-
 .../MockDefaultRequestInterceptorREST.java  | 136 
 .../webapp/TestFederationInterceptorREST.java   | 379 ++
 .../TestFederationInterceptorRESTRetry.java | 274 +++
 .../TestableFederationInterceptorREST.java  |  54 ++
 .../src/site/markdown/Federation.md |   2 +-
 10 files changed, 1646 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc59b5fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index cd4d569..8acaef8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2721,6 +2721,16 @@ public class YarnConfiguration extends Configuration {
   "org.apache.hadoop.yarn.server.router.webapp."
   + "DefaultRequestInterceptorREST";
 
+  /**
+   * The interceptor class used in FederationInterceptorREST to communicate 
with
+   * each SubCluster.
+   */
+  public static final String ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS =
+  ROUTER_WEBAPP_PREFIX + "default-interceptor-class";
+  public static final String DEFAULT_ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS =
+  "org.apache.hadoop.yarn.server.router.webapp."
+  + "DefaultRequestInterceptorREST";
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc59b5fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index b9ad31a..91a8b0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -81,6 +81,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.ROUTER_CLIENTRM_ADDRESS);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.ROUTER_RMADMIN_ADDRESS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS);
 
 // Federation policies configs to be ignored
 configurationPropsToSkipCompare

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc59b5fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
index aa8e3eb..abd8ca6 100644
--- 

hadoop git commit: YARN-6882. AllocationFileLoaderService.reloadAllocations() should use the diamond operator (Contributed by Larry Lo via Daniel Templeton)

2017-08-11 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 11e367374 -> ce0cdc50d


YARN-6882. AllocationFileLoaderService.reloadAllocations() should use the 
diamond operator
(Contributed by Larry Lo via Daniel Templeton)

(cherry picked from commit 0996acde6c325667aa19ae0740eb6b40bf4a682a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce0cdc50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce0cdc50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce0cdc50

Branch: refs/heads/branch-2
Commit: ce0cdc50d9700118139e7907dede274d2db93fda
Parents: 11e3673
Author: Daniel Templeton 
Authored: Fri Aug 11 14:50:46 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:51:42 2017 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce0cdc50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index a142d37..5696a97 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -267,7 +267,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 Map configuredQueues = new HashMap<>();
 
 for (FSQueueType queueType : FSQueueType.values()) {
-  configuredQueues.put(queueType, new HashSet());
+  configuredQueues.put(queueType, new HashSet<>());
 }
 
 // Read and parse the allocations file.
@@ -281,7 +281,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   throw new AllocationConfigurationException("Bad fair scheduler config " +
   "file: top-level element not ");
 NodeList elements = root.getChildNodes();
-List queueElements = new ArrayList();
+List queueElements = new ArrayList<>();
 Element placementPolicyElement = null;
 for (int i = 0; i < elements.getLength(); i++) {
   Node node = elements.item(i);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6882. AllocationFileLoaderService.reloadAllocations() should use the diamond operator (Contributed by Larry Lo via Daniel Templeton)

2017-08-11 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 65364defb -> 0996acde6


YARN-6882. AllocationFileLoaderService.reloadAllocations() should use the 
diamond operator
(Contributed by Larry Lo via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0996acde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0996acde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0996acde

Branch: refs/heads/trunk
Commit: 0996acde6c325667aa19ae0740eb6b40bf4a682a
Parents: 65364de
Author: Daniel Templeton 
Authored: Fri Aug 11 14:50:46 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:50:46 2017 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0996acde/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index bf5b4c5..313a27a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -266,7 +266,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 Map configuredQueues = new HashMap<>();
 
 for (FSQueueType queueType : FSQueueType.values()) {
-  configuredQueues.put(queueType, new HashSet());
+  configuredQueues.put(queueType, new HashSet<>());
 }
 
 // Read and parse the allocations file.
@@ -280,7 +280,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   throw new AllocationConfigurationException("Bad fair scheduler config " +
   "file: top-level element not ");
 NodeList elements = root.getChildNodes();
-List queueElements = new ArrayList();
+List queueElements = new ArrayList<>();
 Element placementPolicyElement = null;
 for (int i = 0; i < elements.getLength(); i++) {
   Node node = elements.item(i);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6967. Limit application attempt's diagnostic message size thoroughly (Contributed by Chengbing Liu via Daniel Templeton)

2017-08-11 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2566e5fcf -> 11e367374


YARN-6967. Limit application attempt's diagnostic message size thoroughly
(Contributed by Chengbing Liu via Daniel Templeton)

(cherry picked from commit 65364defb4a633ca20b39ebc38cd9c0db63a5835)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11e36737
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11e36737
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11e36737

Branch: refs/heads/branch-2
Commit: 11e3673748884c7f33fb9054679cf95f93dc7a32
Parents: 2566e5f
Author: Daniel Templeton 
Authored: Fri Aug 11 14:28:55 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:31:11 2017 -0700

--
 .../rmapp/attempt/RMAppAttemptImpl.java | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11e36737/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 804c08c..d2fa214 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1317,7 +1317,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 // AFTER the initial saving on app-attempt-start
 // These fields can be visible from outside only after they are saved in
 // StateStore
-String diags = null;
+BoundedAppender diags = new BoundedAppender(diagnostics.limit);
 
 // don't leave the tracking URL pointing to a non-existent AM
 if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
@@ -1331,15 +1331,15 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 int exitStatus = ContainerExitStatus.INVALID;
 switch (event.getType()) {
 case LAUNCH_FAILED:
-  diags = event.getDiagnosticMsg();
+  diags.append(event.getDiagnosticMsg());
   break;
 case REGISTERED:
-  diags = getUnexpectedAMRegisteredDiagnostics();
+  diags.append(getUnexpectedAMRegisteredDiagnostics());
   break;
 case UNREGISTERED:
   RMAppAttemptUnregistrationEvent unregisterEvent =
   (RMAppAttemptUnregistrationEvent) event;
-  diags = unregisterEvent.getDiagnosticMsg();
+  diags.append(unregisterEvent.getDiagnosticMsg());
   // reset finalTrackingUrl to url sent by am
   finalTrackingUrl = 
sanitizeTrackingUrl(unregisterEvent.getFinalTrackingUrl());
   finalStatus = unregisterEvent.getFinalApplicationStatus();
@@ -1347,16 +1347,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 case CONTAINER_FINISHED:
   RMAppAttemptContainerFinishedEvent finishEvent =
   (RMAppAttemptContainerFinishedEvent) event;
-  diags = getAMContainerCrashedDiagnostics(finishEvent);
+  diags.append(getAMContainerCrashedDiagnostics(finishEvent));
   exitStatus = finishEvent.getContainerStatus().getExitStatus();
   break;
 case KILL:
   break;
 case FAIL:
-  diags = event.getDiagnosticMsg();
+  diags.append(event.getDiagnosticMsg());
   break;
 case EXPIRE:
-  diags = getAMExpiredDiagnostics(event);
+  diags.append(getAMExpiredDiagnostics(event));
   break;
 default:
   break;
@@ -1370,7 +1370,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 ApplicationAttemptStateData.newInstance(
 applicationAttemptId,  getMasterContainer(),
 rmStore.getCredentialsFromAppAttempt(this),
-startTime, stateToBeStored, finalTrackingUrl, diags,
+startTime, stateToBeStored, finalTrackingUrl, diags.toString(),
 finalStatus, exitStatus,
   getFinishTime(), resUsage.getMemorySeconds(),
   resUsage.getVcoreSeconds(),


-
To unsubscribe, e-mail: 

hadoop git commit: YARN-6967. Limit application attempt's diagnostic message size thoroughly (Contributed by Chengbing Liu via Daniel Templeton)

2017-08-11 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk c7680d4cc -> 65364defb


YARN-6967. Limit application attempt's diagnostic message size thoroughly
(Contributed by Chengbing Liu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65364def
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65364def
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65364def

Branch: refs/heads/trunk
Commit: 65364defb4a633ca20b39ebc38cd9c0db63a5835
Parents: c7680d4
Author: Daniel Templeton 
Authored: Fri Aug 11 14:28:55 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:28:55 2017 -0700

--
 .../rmapp/attempt/RMAppAttemptImpl.java | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65364def/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 4210c54..254768b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1315,7 +1315,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 // AFTER the initial saving on app-attempt-start
 // These fields can be visible from outside only after they are saved in
 // StateStore
-String diags = null;
+BoundedAppender diags = new BoundedAppender(diagnostics.limit);
 
 // don't leave the tracking URL pointing to a non-existent AM
 if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
@@ -1329,15 +1329,15 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 int exitStatus = ContainerExitStatus.INVALID;
 switch (event.getType()) {
 case LAUNCH_FAILED:
-  diags = event.getDiagnosticMsg();
+  diags.append(event.getDiagnosticMsg());
   break;
 case REGISTERED:
-  diags = getUnexpectedAMRegisteredDiagnostics();
+  diags.append(getUnexpectedAMRegisteredDiagnostics());
   break;
 case UNREGISTERED:
   RMAppAttemptUnregistrationEvent unregisterEvent =
   (RMAppAttemptUnregistrationEvent) event;
-  diags = unregisterEvent.getDiagnosticMsg();
+  diags.append(unregisterEvent.getDiagnosticMsg());
   // reset finalTrackingUrl to url sent by am
   finalTrackingUrl = 
sanitizeTrackingUrl(unregisterEvent.getFinalTrackingUrl());
   finalStatus = unregisterEvent.getFinalApplicationStatus();
@@ -1345,16 +1345,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 case CONTAINER_FINISHED:
   RMAppAttemptContainerFinishedEvent finishEvent =
   (RMAppAttemptContainerFinishedEvent) event;
-  diags = getAMContainerCrashedDiagnostics(finishEvent);
+  diags.append(getAMContainerCrashedDiagnostics(finishEvent));
   exitStatus = finishEvent.getContainerStatus().getExitStatus();
   break;
 case KILL:
   break;
 case FAIL:
-  diags = event.getDiagnosticMsg();
+  diags.append(event.getDiagnosticMsg());
   break;
 case EXPIRE:
-  diags = getAMExpiredDiagnostics(event);
+  diags.append(getAMExpiredDiagnostics(event));
   break;
 default:
   break;
@@ -1368,7 +1368,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 ApplicationAttemptStateData.newInstance(
 applicationAttemptId,  getMasterContainer(),
 rmStore.getCredentialsFromAppAttempt(this),
-startTime, stateToBeStored, finalTrackingUrl, diags,
+startTime, stateToBeStored, finalTrackingUrl, diags.toString(),
 finalStatus, exitStatus,
   getFinishTime(), resUsage.getMemorySeconds(),
   resUsage.getVcoreSeconds(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org


hadoop git commit: YARN-6884. AllocationFileLoaderService.loadQueue() has an if without braces (Contributed by weiyuan via Daniel Templeton)

2017-08-11 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f6c74ac5e -> 2566e5fcf


YARN-6884. AllocationFileLoaderService.loadQueue() has an if without braces
(Contributed by weiyuan via Daniel Templeton)

(cherry picked from commit c7680d4cc4d9302a5b5efcf2467bd32ecea99585)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2566e5fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2566e5fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2566e5fc

Branch: refs/heads/branch-2
Commit: 2566e5fcf7b4126ec41722fce461b34e5dbf8fc6
Parents: f6c74ac
Author: Daniel Templeton 
Authored: Fri Aug 11 14:22:02 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:23:17 2017 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java| 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2566e5fc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 163a265..a142d37 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -295,8 +295,9 @@ public class AllocationFileLoaderService extends 
AbstractService {
   NodeList fields = element.getChildNodes();
   for (int j = 0; j < fields.getLength(); j++) {
 Node fieldNode = fields.item(j);
-if (!(fieldNode instanceof Element))
+if (!(fieldNode instanceof Element)) {
   continue;
+}
 Element field = (Element) fieldNode;
 if ("maxRunningApps".equals(field.getTagName())) {
   String text = ((Text)field.getFirstChild()).getData().trim();
@@ -491,8 +492,9 @@ public class AllocationFileLoaderService extends 
AbstractService {
 
 for (int j = 0; j < fields.getLength(); j++) {
   Node fieldNode = fields.item(j);
-  if (!(fieldNode instanceof Element))
+  if (!(fieldNode instanceof Element)) {
 continue;
+  }
   Element field = (Element) fieldNode;
   if ("minResources".equals(field.getTagName())) {
 String text = ((Text)field.getFirstChild()).getData().trim();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14741. Refactor curator based ZooKeeper communication into common library. (Íñigo Goiri via Subru).

2017-08-11 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8c4b6d16a -> bbbf0e2a4


HADOOP-14741. Refactor curator based ZooKeeper communication into common 
library. (Íñigo Goiri via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbbf0e2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbbf0e2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbbf0e2a

Branch: refs/heads/trunk
Commit: bbbf0e2a4136b30cad9dfd36ef138650a1adea60
Parents: 8c4b6d1
Author: Subru Krishnan 
Authored: Fri Aug 11 13:58:45 2017 -0700
Committer: Subru Krishnan 
Committed: Fri Aug 11 13:58:45 2017 -0700

--
 .../hadoop/fs/CommonConfigurationKeys.java  |  21 ++
 .../hadoop/util/curator/ZKCuratorManager.java   | 294 +++
 .../hadoop/util/curator/package-info.java   |  27 ++
 .../src/main/resources/core-default.xml |  46 +++
 .../util/curator/TestZKCuratorManager.java  |  95 ++
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +-
 .../yarn/conf/TestYarnConfigurationFields.java  |   9 +
 .../src/main/resources/yarn-default.xml |  53 
 ...ActiveStandbyElectorBasedElectorService.java |   5 +-
 .../yarn/server/resourcemanager/RMZKUtils.java  |  81 -
 .../server/resourcemanager/ResourceManager.java |  83 +++---
 .../recovery/ZKRMStateStore.java|  38 ++-
 .../server/resourcemanager/RMHATestBase.java|   5 +-
 13 files changed, 567 insertions(+), 203 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbbf0e2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index e53f71e..0da4bbd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -377,4 +377,25 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
 
   // HDFS client HTrace configuration.
   public static final String  FS_CLIENT_HTRACE_PREFIX = "fs.client.htrace.";
+
+  // Global ZooKeeper configuration keys
+  public static final String ZK_PREFIX = "hadoop.zk.";
+  /** ACL for the ZooKeeper ensemble. */
+  public static final String ZK_ACL = ZK_PREFIX + "acl";
+  public static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";
+  /** Authentication for the ZooKeeper ensemble. */
+  public static final String ZK_AUTH = ZK_PREFIX + "auth";
+
+  /** Address of the ZooKeeper ensemble. */
+  public static final String ZK_ADDRESS = ZK_PREFIX + "address";
+  /** Maximum number of retries for a ZooKeeper operation. */
+  public static final String ZK_NUM_RETRIES = ZK_PREFIX + "num-retries";
+  public static final intZK_NUM_RETRIES_DEFAULT = 1000;
+  /** Timeout for a ZooKeeper operation in ZooKeeper in milliseconds. */
+  public static final String ZK_TIMEOUT_MS = ZK_PREFIX + "timeout-ms";
+  public static final intZK_TIMEOUT_MS_DEFAULT = 1;
+  /** How often to retry a ZooKeeper operation  in milliseconds. */
+  public static final String ZK_RETRY_INTERVAL_MS =
+  ZK_PREFIX + "retry-interval-ms";
+  public static final intZK_RETRY_INTERVAL_MS_DEFAULT = 1000;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbbf0e2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
new file mode 100644
index 000..3adf028
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
@@ -0,0 +1,294 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed 

hadoop git commit: YARN-5927. BaseContainerManagerTest::waitForNMContainerState timeout accounting is not accurate. (Kai Sasaki via kasha)

2017-08-11 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e208a4e7e -> f6c74ac5e


YARN-5927. BaseContainerManagerTest::waitForNMContainerState timeout accounting 
is not accurate. (Kai Sasaki via kasha)

(cherry picked from commit 8c4b6d16a526610a03ccc85665744ad071e37400)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6c74ac5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6c74ac5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6c74ac5

Branch: refs/heads/branch-2
Commit: f6c74ac5eac998192d0cc230fe9785820dfe1939
Parents: e208a4e
Author: Karthik Kambatla 
Authored: Fri Aug 11 12:14:06 2017 -0700
Committer: Karthik Kambatla 
Committed: Fri Aug 11 12:18:29 2017 -0700

--
 .../containermanager/BaseContainerManagerTest.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6c74ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index 01af521..694f126 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -310,13 +310,13 @@ public abstract class BaseContainerManagerTest {
 new HashSet<>(finalStates);
 int timeoutSecs = 0;
 do {
-  Thread.sleep(2000);
+  Thread.sleep(1000);
   containerStatus =
   containerManager.getContainerStatuses(request)
   .getContainerStatuses().get(0);
   LOG.info("Waiting for container to get into one of states " + fStates
   + ". Current state is " + containerStatus.getState());
-  timeoutSecs += 2;
+  timeoutSecs += 1;
 } while (!fStates.contains(containerStatus.getState())
 && timeoutSecs < timeOutMax);
 LOG.info("Container state is " + containerStatus.getState());
@@ -371,7 +371,7 @@ public abstract class BaseContainerManagerTest {
 .containermanager.container.ContainerState currentState = null;
 int timeoutSecs = 0;
 do {
-  Thread.sleep(2000);
+  Thread.sleep(1000);
   container =
   containerManager.getContext().getContainers().get(containerID);
   if (container != null) {
@@ -381,9 +381,9 @@ public abstract class BaseContainerManagerTest {
 LOG.info("Waiting for NM container to get into one of the following " +
 "states: " + finalStates + ". Current state is " + currentState);
   }
-  timeoutSecs += 2;
+  timeoutSecs += 1;
 } while (!finalStates.contains(currentState)
-&& timeoutSecs++ < timeOutMax);
+&& timeoutSecs < timeOutMax);
 LOG.info("Container state is " + currentState);
 Assert.assertTrue("ContainerState is not correct (timedout)",
 finalStates.contains(currentState));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5927. BaseContainerManagerTest::waitForNMContainerState timeout accounting is not accurate. (Kai Sasaki via kasha)

2017-08-11 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 07fff43f4 -> 8c4b6d16a


YARN-5927. BaseContainerManagerTest::waitForNMContainerState timeout accounting 
is not accurate. (Kai Sasaki via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c4b6d16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c4b6d16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c4b6d16

Branch: refs/heads/trunk
Commit: 8c4b6d16a526610a03ccc85665744ad071e37400
Parents: 07fff43
Author: Karthik Kambatla 
Authored: Fri Aug 11 12:14:06 2017 -0700
Committer: Karthik Kambatla 
Committed: Fri Aug 11 12:15:43 2017 -0700

--
 .../containermanager/BaseContainerManagerTest.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c4b6d16/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index 7980a80..d266ac1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -310,13 +310,13 @@ public abstract class BaseContainerManagerTest {
 new HashSet<>(finalStates);
 int timeoutSecs = 0;
 do {
-  Thread.sleep(2000);
+  Thread.sleep(1000);
   containerStatus =
   containerManager.getContainerStatuses(request)
   .getContainerStatuses().get(0);
   LOG.info("Waiting for container to get into one of states " + fStates
   + ". Current state is " + containerStatus.getState());
-  timeoutSecs += 2;
+  timeoutSecs += 1;
 } while (!fStates.contains(containerStatus.getState())
 && timeoutSecs < timeOutMax);
 LOG.info("Container state is " + containerStatus.getState());
@@ -371,7 +371,7 @@ public abstract class BaseContainerManagerTest {
 .containermanager.container.ContainerState currentState = null;
 int timeoutSecs = 0;
 do {
-  Thread.sleep(2000);
+  Thread.sleep(1000);
   container =
   containerManager.getContext().getContainers().get(containerID);
   if (container != null) {
@@ -381,9 +381,9 @@ public abstract class BaseContainerManagerTest {
 LOG.info("Waiting for NM container to get into one of the following " +
 "states: " + finalStates + ". Current state is " + currentState);
   }
-  timeoutSecs += 2;
+  timeoutSecs += 1;
 } while (!finalStates.contains(currentState)
-&& timeoutSecs++ < timeOutMax);
+&& timeoutSecs < timeOutMax);
 LOG.info("Container state is " + currentState);
 Assert.assertTrue("ContainerState is not correct (timedout)",
 finalStates.contains(currentState));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14760. Add missing override to LoadBalancingKMSClientProvider.

2017-08-11 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 582648bef -> 07fff43f4


HADOOP-14760. Add missing override to LoadBalancingKMSClientProvider.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07fff43f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07fff43f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07fff43f

Branch: refs/heads/trunk
Commit: 07fff43f4a1e724c83ff8fcc90fac64aa04a39eb
Parents: 582648b
Author: Xiao Chen 
Authored: Fri Aug 11 11:41:16 2017 -0700
Committer: Xiao Chen 
Committed: Fri Aug 11 11:41:41 2017 -0700

--
 .../hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07fff43f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 6b20c99..6e010b1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -292,7 +292,9 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 }
   }
 
-  public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
+  @Override
+  public EncryptedKeyVersion reencryptEncryptedKey(
+  final EncryptedKeyVersion ekv)
   throws IOException, GeneralSecurityException {
 try {
   return doOp(new ProviderCallable() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HADOOP-14260. Configuration.dumpConfiguration should redact sensitive information. Contributed by John Zhuge.

2017-08-11 Thread jzhuge
HADOOP-14260. Configuration.dumpConfiguration should redact sensitive 
information. Contributed by John Zhuge.

(cherry picked from commit 582648befaf9908159f937d2cc8f549583a3483e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9677445
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9677445
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9677445

Branch: refs/heads/branch-2.8
Commit: e96774455819978ff36c471b82c94475ab1cb741
Parents: 407bbe3
Author: John Zhuge 
Authored: Fri Aug 11 11:02:18 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 11:02:18 2017 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   | 15 +++---
 .../apache/hadoop/conf/TestConfiguration.java   | 48 ++--
 2 files changed, 53 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9677445/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index cf02070..b9f4f2a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2952,7 +2952,8 @@ public class Configuration implements 
Iterable>,
   JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
   dumpGenerator.writeStartObject();
   dumpGenerator.writeFieldName("property");
-  appendJSONProperty(dumpGenerator, config, propertyName);
+  appendJSONProperty(dumpGenerator, config, propertyName,
+  new ConfigRedactor(config));
   dumpGenerator.writeEndObject();
   dumpGenerator.flush();
 }
@@ -2992,11 +2993,11 @@ public class Configuration implements 
Iterable>,
 dumpGenerator.writeFieldName("properties");
 dumpGenerator.writeStartArray();
 dumpGenerator.flush();
+ConfigRedactor redactor = new ConfigRedactor(config);
 synchronized (config) {
   for (Map.Entry item: config.getProps().entrySet()) {
-appendJSONProperty(dumpGenerator,
-config,
-item.getKey().toString());
+appendJSONProperty(dumpGenerator, config, item.getKey().toString(),
+redactor);
   }
 }
 dumpGenerator.writeEndArray();
@@ -3014,12 +3015,14 @@ public class Configuration implements 
Iterable>,
* @throws IOException
*/
   private static void appendJSONProperty(JsonGenerator jsonGen,
-  Configuration config, String name) throws IOException {
+  Configuration config, String name, ConfigRedactor redactor)
+  throws IOException {
 // skip writing if given property name is empty or null
 if(!Strings.isNullOrEmpty(name) && jsonGen != null) {
   jsonGen.writeStartObject();
   jsonGen.writeStringField("key", name);
-  jsonGen.writeStringField("value", config.get(name));
+  jsonGen.writeStringField("value",
+  redactor.redact(name, config.get(name)));
   jsonGen.writeBooleanField("isFinal",
   config.finalParameters.contains(name));
   String[] resources = config.updatingResource.get(name);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9677445/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 4d0c705..54e3613 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -45,6 +45,7 @@ import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -73,6 +74,11 @@ public class TestConfiguration extends TestCase {
   /** Four apostrophes. */
   public static final String ESCAPED = "";
 
+  private static final String SENSITIVE_CONFIG_KEYS =
+  CommonConfigurationKeysPublic.HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;
+
+ 

hadoop git commit: YARN-6820. Restrict read access to timelineservice v2 data. Contributed by Vrushali C

2017-08-11 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 7b2cb0614 -> ee5d80d3b


YARN-6820. Restrict read access to timelineservice v2 data. Contributed by 
Vrushali C


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee5d80d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee5d80d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee5d80d3

Branch: refs/heads/YARN-5355
Commit: ee5d80d3b1177b8e058b6c3629b17dfb576c8fb0
Parents: 7b2cb06
Author: Jason Lowe 
Authored: Fri Aug 11 13:05:05 2017 -0500
Committer: Jason Lowe 
Committed: Fri Aug 11 13:05:05 2017 -0500

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  31 ++
 .../reader/TimelineReaderServer.java|   5 +
 .../reader/TimelineReaderWebServicesUtils.java  |  29 +-
 ...elineReaderWhitelistAuthorizationFilter.java | 123 ++
 ...WhitelistAuthorizationFilterInitializer.java |  66 
 ...elineReaderWhitelistAuthorizationFilter.java | 380 +++
 6 files changed, 630 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee5d80d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b8cc4fd..f01a0f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2109,6 +2109,37 @@ public class YarnConfiguration extends Configuration {
   + "hbase.configuration.file";
 
   /**
+   * The name for setting that enables or disables authentication checks
+   * for reading timeline service v2 data.
+   */
+  public static final String TIMELINE_SERVICE_READ_AUTH_ENABLED =
+  TIMELINE_SERVICE_PREFIX + "read.authentication.enabled";
+
+  /**
+   * The default setting for authentication checks for reading timeline
+   * service v2 data.
+   */
+  public static final Boolean DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED =
+  false;
+
+  /**
+   * The name for setting that lists the users and groups who are allowed
+   * to read timeline service v2 data. It is a comma separated list of
+   * user, followed by space, then comma separated list of groups.
+   * It will allow this list of users and groups to read the data
+   * and reject everyone else.
+   */
+  public static final String TIMELINE_SERVICE_READ_ALLOWED_USERS =
+  TIMELINE_SERVICE_PREFIX + "read.allowed.users";
+
+  /**
+   * The default value for list of the users who are allowed to read
+   * timeline service v2 data.
+   */
+  public static final String DEFAULT_TIMELINE_SERVICE_READ_ALLOWED_USERS =
+  "";
+
+  /**
* The setting that controls how long the final value of a metric of a
* completed app is retained before merging into the flow sum. Up to this 
time
* after an application is completed out-of-order values that arrive can be

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee5d80d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 1d5d6e2..0409356 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderAuthenticationFilterInitializer;
+import 

[1/3] hadoop git commit: HADOOP-13588. ConfServlet should respect Accept request header. Contributed by Weiwei Yang

2017-08-11 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a54c3437a -> e96774455


HADOOP-13588. ConfServlet should respect Accept request header. Contributed by 
Weiwei Yang

(cherry picked from commit 59d59667a8b1d3fb4a744a41774b2397fd91cbb3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fdcf86e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fdcf86e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fdcf86e

Branch: refs/heads/branch-2.8
Commit: 8fdcf86e6468a7a48a28554136f2cb0da2587ccf
Parents: a54c343
Author: John Zhuge 
Authored: Fri Aug 11 11:02:09 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 11:02:09 2017 -0700

--
 .../org/apache/hadoop/conf/ConfServlet.java | 21 ++---
 .../org/apache/hadoop/conf/TestConfServlet.java | 24 
 2 files changed, 37 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fdcf86e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index d4b34e9..f9a2535 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -25,11 +25,14 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.HttpHeaders;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.HttpServer2;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * A servlet to print out the running configuration data.
  */
@@ -38,9 +41,8 @@ import org.apache.hadoop.http.HttpServer2;
 public class ConfServlet extends HttpServlet {
   private static final long serialVersionUID = 1L;
 
-  private static final String FORMAT_JSON = "json";
-  private static final String FORMAT_XML = "xml";
-  private static final String FORMAT_PARAM = "format";
+  protected static final String FORMAT_JSON = "json";
+  protected static final String FORMAT_XML = "xml";
 
   /**
* Return the Configuration of the daemon hosting this servlet.
@@ -67,11 +69,7 @@ public class ConfServlet extends HttpServlet {
   return;
 }
 
-String format = request.getParameter(FORMAT_PARAM);
-if (null == format) {
-  format = FORMAT_XML;
-}
-
+String format = parseAccecptHeader(request);
 if (FORMAT_XML.equals(format)) {
   response.setContentType("text/xml; charset=utf-8");
 } else if (FORMAT_JSON.equals(format)) {
@@ -87,6 +85,13 @@ public class ConfServlet extends HttpServlet {
 out.close();
   }
 
+  @VisibleForTesting
+  static String parseAccecptHeader(HttpServletRequest request) {
+String format = request.getHeader(HttpHeaders.ACCEPT);
+return format != null && format.contains(FORMAT_JSON) ?
+FORMAT_JSON : FORMAT_XML;
+  }
+
   /**
* Guts of the servlet - extracted for easy testing.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fdcf86e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
index 1c22ee6..73d4992 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
@@ -19,7 +19,11 @@ package org.apache.hadoop.conf;
 
 import java.io.StringWriter;
 import java.io.StringReader;
+import java.util.HashMap;
 import java.util.Map;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.core.HttpHeaders;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 
@@ -32,6 +36,7 @@ import org.xml.sax.InputSource;
 
 import junit.framework.TestCase;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * Basic test case that the ConfServlet can write configuration
@@ -48,6 +53,25 @@ public class TestConfServlet extends TestCase {
   }
 
   @Test
+  public void testParseHeaders() throws Exception {
+HashMap 

[2/3] hadoop git commit: HADOOP-13628. Support to retrieve specific property from configuration via REST API. Contributed by Weiwei Yang

2017-08-11 Thread jzhuge
HADOOP-13628. Support to retrieve specific property from configuration via REST 
API. Contributed by Weiwei Yang

(cherry picked from commit 00160f71b6d98244fcb1cb58b2db9fc24f1cd672)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/407bbe39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/407bbe39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/407bbe39

Branch: refs/heads/branch-2.8
Commit: 407bbe39facf308a6c954e5b901860eacdfd4707
Parents: 8fdcf86
Author: John Zhuge 
Authored: Fri Aug 11 11:02:14 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 11:02:14 2017 -0700

--
 .../org/apache/hadoop/conf/ConfServlet.java |  19 +-
 .../org/apache/hadoop/conf/Configuration.java   | 284 +++
 .../org/apache/hadoop/conf/TestConfServlet.java | 122 +++-
 .../apache/hadoop/conf/TestConfiguration.java   | 139 -
 4 files changed, 491 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/407bbe39/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index f9a2535..cfd7b97 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -76,11 +76,14 @@ public class ConfServlet extends HttpServlet {
   response.setContentType("application/json; charset=utf-8");
 }
 
+String name = request.getParameter("name");
 Writer out = response.getWriter();
 try {
-  writeResponse(getConfFromContext(), out, format);
+  writeResponse(getConfFromContext(), out, format, name);
 } catch (BadFormatException bfe) {
   response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
+} catch (IllegalArgumentException iae) {
+  response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage());
 }
 out.close();
   }
@@ -95,17 +98,23 @@ public class ConfServlet extends HttpServlet {
   /**
* Guts of the servlet - extracted for easy testing.
*/
-  static void writeResponse(Configuration conf, Writer out, String format)
-throws IOException, BadFormatException {
+  static void writeResponse(Configuration conf,
+  Writer out, String format, String propertyName)
+  throws IOException, IllegalArgumentException, BadFormatException {
 if (FORMAT_JSON.equals(format)) {
-  Configuration.dumpConfiguration(conf, out);
+  Configuration.dumpConfiguration(conf, propertyName, out);
 } else if (FORMAT_XML.equals(format)) {
-  conf.writeXml(out);
+  conf.writeXml(propertyName, out);
 } else {
   throw new BadFormatException("Bad format: " + format);
 }
   }
 
+  static void writeResponse(Configuration conf, Writer out, String format)
+  throws IOException, BadFormatException {
+writeResponse(conf, out, format, null);
+  }
+
   public static class BadFormatException extends Exception {
 private static final long serialVersionUID = 1L;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/407bbe39/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index a746119..cf02070 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -101,8 +101,9 @@ import org.w3c.dom.Text;
 import org.xml.sax.SAXException;
 
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 
-/** 
+/**
  * Provides access to configuration parameters.
  *
  * Resources
@@ -2756,14 +2757,37 @@ public class Configuration implements 
Iterable>,
 writeXml(new OutputStreamWriter(out, "UTF-8"));
   }
 
-  /** 
-   * Write out the non-default properties in this configuration to the given
-   * {@link Writer}.
-   * 
+  public void writeXml(Writer out) throws IOException {
+writeXml(null, out);
+  }
+
+  /**
+   * Write out the non-default properties in this configuration to the
+   * given {@link Writer}.
+   *
+   * 
+   * When property name is not empty and the property exists in the
+ 

[2/3] hadoop git commit: HADOOP-13628. Support to retrieve specific property from configuration via REST API. Contributed by Weiwei Yang

2017-08-11 Thread jzhuge
HADOOP-13628. Support to retrieve specific property from configuration via REST 
API. Contributed by Weiwei Yang

(cherry picked from commit 00160f71b6d98244fcb1cb58b2db9fc24f1cd672)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76c4aee2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76c4aee2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76c4aee2

Branch: refs/heads/branch-2
Commit: 76c4aee2e3fd66750df94efd4e5b7504a6c1e6fa
Parents: 9ba3268
Author: John Zhuge 
Authored: Fri Aug 11 10:54:30 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 10:54:30 2017 -0700

--
 .../org/apache/hadoop/conf/ConfServlet.java |  19 +-
 .../org/apache/hadoop/conf/Configuration.java   | 284 +++
 .../org/apache/hadoop/conf/TestConfServlet.java | 122 +++-
 .../apache/hadoop/conf/TestConfiguration.java   | 139 -
 4 files changed, 491 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c4aee2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index f9a2535..cfd7b97 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -76,11 +76,14 @@ public class ConfServlet extends HttpServlet {
   response.setContentType("application/json; charset=utf-8");
 }
 
+String name = request.getParameter("name");
 Writer out = response.getWriter();
 try {
-  writeResponse(getConfFromContext(), out, format);
+  writeResponse(getConfFromContext(), out, format, name);
 } catch (BadFormatException bfe) {
   response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
+} catch (IllegalArgumentException iae) {
+  response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage());
 }
 out.close();
   }
@@ -95,17 +98,23 @@ public class ConfServlet extends HttpServlet {
   /**
* Guts of the servlet - extracted for easy testing.
*/
-  static void writeResponse(Configuration conf, Writer out, String format)
-throws IOException, BadFormatException {
+  static void writeResponse(Configuration conf,
+  Writer out, String format, String propertyName)
+  throws IOException, IllegalArgumentException, BadFormatException {
 if (FORMAT_JSON.equals(format)) {
-  Configuration.dumpConfiguration(conf, out);
+  Configuration.dumpConfiguration(conf, propertyName, out);
 } else if (FORMAT_XML.equals(format)) {
-  conf.writeXml(out);
+  conf.writeXml(propertyName, out);
 } else {
   throw new BadFormatException("Bad format: " + format);
 }
   }
 
+  static void writeResponse(Configuration conf, Writer out, String format)
+  throws IOException, BadFormatException {
+writeResponse(conf, out, format, null);
+  }
+
   public static class BadFormatException extends Exception {
 private static final long serialVersionUID = 1L;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c4aee2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 40509c1..89714bf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -103,8 +103,9 @@ import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 
-/** 
+/**
  * Provides access to configuration parameters.
  *
  * Resources
@@ -2866,14 +2867,37 @@ public class Configuration implements 
Iterable>,
 writeXml(new OutputStreamWriter(out, "UTF-8"));
   }
 
-  /** 
-   * Write out the non-default properties in this configuration to the given
-   * {@link Writer}.
-   * 
+  public void writeXml(Writer out) throws IOException {
+writeXml(null, out);
+  }
+
+  /**
+   * Write out the non-default properties in this configuration to the
+   * given {@link Writer}.
+   *
+   * 
+   * When property name is not empty and the property exists in the
+   

[1/3] hadoop git commit: HADOOP-13588. ConfServlet should respect Accept request header. Contributed by Weiwei Yang

2017-08-11 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9c6e8ece9 -> e208a4e7e


HADOOP-13588. ConfServlet should respect Accept request header. Contributed by 
Weiwei Yang

(cherry picked from commit 59d59667a8b1d3fb4a744a41774b2397fd91cbb3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ba3268f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ba3268f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ba3268f

Branch: refs/heads/branch-2
Commit: 9ba3268fa3ad4fec5222361058a2086d2cb8c3e0
Parents: 9c6e8ec
Author: John Zhuge 
Authored: Fri Aug 11 10:54:25 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 10:54:25 2017 -0700

--
 .../org/apache/hadoop/conf/ConfServlet.java | 21 ++---
 .../org/apache/hadoop/conf/TestConfServlet.java | 24 
 2 files changed, 37 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ba3268f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index d4b34e9..f9a2535 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -25,11 +25,14 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.HttpHeaders;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.HttpServer2;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * A servlet to print out the running configuration data.
  */
@@ -38,9 +41,8 @@ import org.apache.hadoop.http.HttpServer2;
 public class ConfServlet extends HttpServlet {
   private static final long serialVersionUID = 1L;
 
-  private static final String FORMAT_JSON = "json";
-  private static final String FORMAT_XML = "xml";
-  private static final String FORMAT_PARAM = "format";
+  protected static final String FORMAT_JSON = "json";
+  protected static final String FORMAT_XML = "xml";
 
   /**
* Return the Configuration of the daemon hosting this servlet.
@@ -67,11 +69,7 @@ public class ConfServlet extends HttpServlet {
   return;
 }
 
-String format = request.getParameter(FORMAT_PARAM);
-if (null == format) {
-  format = FORMAT_XML;
-}
-
+String format = parseAccecptHeader(request);
 if (FORMAT_XML.equals(format)) {
   response.setContentType("text/xml; charset=utf-8");
 } else if (FORMAT_JSON.equals(format)) {
@@ -87,6 +85,13 @@ public class ConfServlet extends HttpServlet {
 out.close();
   }
 
+  @VisibleForTesting
+  static String parseAccecptHeader(HttpServletRequest request) {
+String format = request.getHeader(HttpHeaders.ACCEPT);
+return format != null && format.contains(FORMAT_JSON) ?
+FORMAT_JSON : FORMAT_XML;
+  }
+
   /**
* Guts of the servlet - extracted for easy testing.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ba3268f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
index 1c22ee6..73d4992 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
@@ -19,7 +19,11 @@ package org.apache.hadoop.conf;
 
 import java.io.StringWriter;
 import java.io.StringReader;
+import java.util.HashMap;
 import java.util.Map;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.core.HttpHeaders;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 
@@ -32,6 +36,7 @@ import org.xml.sax.InputSource;
 
 import junit.framework.TestCase;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * Basic test case that the ConfServlet can write configuration
@@ -48,6 +53,25 @@ public class TestConfServlet extends TestCase {
   }
 
   @Test
+  public void testParseHeaders() throws Exception {
+HashMap 

[3/3] hadoop git commit: HADOOP-14260. Configuration.dumpConfiguration should redact sensitive information. Contributed by John Zhuge.

2017-08-11 Thread jzhuge
HADOOP-14260. Configuration.dumpConfiguration should redact sensitive 
information. Contributed by John Zhuge.

(cherry picked from commit 582648befaf9908159f937d2cc8f549583a3483e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e208a4e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e208a4e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e208a4e7

Branch: refs/heads/branch-2
Commit: e208a4e7ee5725b910a7e799066000cacf855218
Parents: 76c4aee
Author: John Zhuge 
Authored: Fri Aug 11 10:54:41 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 10:54:41 2017 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   | 15 +++---
 .../apache/hadoop/conf/TestConfiguration.java   | 48 ++--
 2 files changed, 53 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e208a4e7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 89714bf..42aa8f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -3062,7 +3062,8 @@ public class Configuration implements 
Iterable>,
   JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
   dumpGenerator.writeStartObject();
   dumpGenerator.writeFieldName("property");
-  appendJSONProperty(dumpGenerator, config, propertyName);
+  appendJSONProperty(dumpGenerator, config, propertyName,
+  new ConfigRedactor(config));
   dumpGenerator.writeEndObject();
   dumpGenerator.flush();
 }
@@ -3102,11 +3103,11 @@ public class Configuration implements 
Iterable>,
 dumpGenerator.writeFieldName("properties");
 dumpGenerator.writeStartArray();
 dumpGenerator.flush();
+ConfigRedactor redactor = new ConfigRedactor(config);
 synchronized (config) {
   for (Map.Entry item: config.getProps().entrySet()) {
-appendJSONProperty(dumpGenerator,
-config,
-item.getKey().toString());
+appendJSONProperty(dumpGenerator, config, item.getKey().toString(),
+redactor);
   }
 }
 dumpGenerator.writeEndArray();
@@ -3124,12 +3125,14 @@ public class Configuration implements 
Iterable>,
* @throws IOException
*/
   private static void appendJSONProperty(JsonGenerator jsonGen,
-  Configuration config, String name) throws IOException {
+  Configuration config, String name, ConfigRedactor redactor)
+  throws IOException {
 // skip writing if given property name is empty or null
 if(!Strings.isNullOrEmpty(name) && jsonGen != null) {
   jsonGen.writeStartObject();
   jsonGen.writeStringField("key", name);
-  jsonGen.writeStringField("value", config.get(name));
+  jsonGen.writeStringField("value",
+  redactor.redact(name, config.get(name)));
   jsonGen.writeBooleanField("isFinal",
   config.finalParameters.contains(name));
   String[] resources = config.updatingResource.get(name);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e208a4e7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 24a49ad..66f717d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -48,6 +48,7 @@ import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -83,6 +84,11 @@ public class TestConfiguration extends TestCase {
   /** Four apostrophes. */
   public static final String ESCAPED = "";
 
+  private static final String SENSITIVE_CONFIG_KEYS =
+  CommonConfigurationKeysPublic.HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;
+
+  

hadoop git commit: HADOOP-14260. Configuration.dumpConfiguration should redact sensitive information. Contributed by John Zhuge.

2017-08-11 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4222c9710 -> 582648bef


HADOOP-14260. Configuration.dumpConfiguration should redact sensitive 
information. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/582648be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/582648be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/582648be

Branch: refs/heads/trunk
Commit: 582648befaf9908159f937d2cc8f549583a3483e
Parents: 4222c97
Author: John Zhuge 
Authored: Thu Aug 10 16:28:22 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 10:16:08 2017 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   | 15 +++---
 .../apache/hadoop/conf/TestConfiguration.java   | 48 ++--
 2 files changed, 53 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/582648be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 65e8569..edaee68 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -3146,7 +3146,8 @@ public class Configuration implements 
Iterable>,
   JsonGenerator dumpGenerator = dumpFactory.createGenerator(out);
   dumpGenerator.writeStartObject();
   dumpGenerator.writeFieldName("property");
-  appendJSONProperty(dumpGenerator, config, propertyName);
+  appendJSONProperty(dumpGenerator, config, propertyName,
+  new ConfigRedactor(config));
   dumpGenerator.writeEndObject();
   dumpGenerator.flush();
 }
@@ -3186,11 +3187,11 @@ public class Configuration implements 
Iterable>,
 dumpGenerator.writeFieldName("properties");
 dumpGenerator.writeStartArray();
 dumpGenerator.flush();
+ConfigRedactor redactor = new ConfigRedactor(config);
 synchronized (config) {
   for (Map.Entry item: config.getProps().entrySet()) {
-appendJSONProperty(dumpGenerator,
-config,
-item.getKey().toString());
+appendJSONProperty(dumpGenerator, config, item.getKey().toString(),
+redactor);
   }
 }
 dumpGenerator.writeEndArray();
@@ -3208,12 +3209,14 @@ public class Configuration implements 
Iterable>,
* @throws IOException
*/
   private static void appendJSONProperty(JsonGenerator jsonGen,
-  Configuration config, String name) throws IOException {
+  Configuration config, String name, ConfigRedactor redactor)
+  throws IOException {
 // skip writing if given property name is empty or null
 if(!Strings.isNullOrEmpty(name) && jsonGen != null) {
   jsonGen.writeStartObject();
   jsonGen.writeStringField("key", name);
-  jsonGen.writeStringField("value", config.get(name));
+  jsonGen.writeStringField("value",
+  redactor.redact(name, config.get(name)));
   jsonGen.writeBooleanField("isFinal",
   config.finalParameters.contains(name));
   String[] resources = config.updatingResource.get(name);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/582648be/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 92d3290..91f25fa 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -49,6 +49,7 @@ import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -82,6 +83,11 @@ public class TestConfiguration extends TestCase {
   /** Four apostrophes. */
   public static final String ESCAPED = "";
 
+  private static final String SENSITIVE_CONFIG_KEYS =
+  CommonConfigurationKeysPublic.HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;

[20/50] [abbrv] hadoop git commit: YARN-6890. Not display killApp button on UI if UI is unsecured but cluster is secured. Contributed by Junping Du

2017-08-11 Thread wangda
YARN-6890. Not display killApp button on UI if UI is unsecured but cluster is 
secured. Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acf9bd8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acf9bd8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acf9bd8b

Branch: refs/heads/YARN-5881
Commit: acf9bd8b1d87b9c46821ecf0461d8dcd0a6ec6d6
Parents: 47b145b
Author: Jian He 
Authored: Tue Aug 8 11:09:38 2017 -0700
Committer: Jian He 
Committed: Tue Aug 8 11:09:38 2017 -0700

--
 .../hadoop/fs/CommonConfigurationKeysPublic.java  |  2 ++
 .../apache/hadoop/yarn/server/webapp/AppBlock.java| 14 +-
 2 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acf9bd8b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index e8d4b4c..4fda2b8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -608,6 +608,8 @@ public class CommonConfigurationKeysPublic {
*/
   public static final String HADOOP_TOKEN_FILES =
   "hadoop.token.files";
+  public static final String HADOOP_HTTP_AUTHENTICATION_TYPE =
+"hadoop.http.authentication.type";
 
   /**
* @see

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acf9bd8b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index d4090aa..693aa04 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -30,6 +30,7 @@ import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.security.UserGroupInformation;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
@@ -70,6 +71,8 @@ public class AppBlock extends HtmlBlock {
   protected ApplicationBaseProtocol appBaseProt;
   protected Configuration conf;
   protected ApplicationId appID = null;
+  private boolean unsecuredUI = true;
+
 
   @Inject
   protected AppBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx,
@@ -77,6 +80,9 @@ public class AppBlock extends HtmlBlock {
 super(ctx);
 this.appBaseProt = appBaseProt;
 this.conf = conf;
+// check if UI is unsecured.
+String httpAuth = 
conf.get(CommonConfigurationKeys.HADOOP_HTTP_AUTHENTICATION_TYPE);
+this.unsecuredUI = (httpAuth != null) && httpAuth.equals("simple");
   }
 
   @Override
@@ -129,10 +135,16 @@ public class AppBlock extends HtmlBlock {
 
 setTitle(join("Application ", aid));
 
+// YARN-6890. for secured cluster allow anonymous UI access, application 
kill
+// shouldn't be there.
+boolean unsecuredUIForSecuredCluster = 
UserGroupInformation.isSecurityEnabled()
+&& this.unsecuredUI;
+
 if (webUiType != null
 && webUiType.equals(YarnWebParams.RM_WEB_UI)
 && conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED,
-  YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED)) {
+  YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED)
+&& !unsecuredUIForSecuredCluster) {
   // Application Kill
   html.div()
 .button()


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: YARN-6033. Add support for sections in container-executor configuration file. (Varun Vasudev via wandga)

2017-08-11 Thread wangda
YARN-6033. Add support for sections in container-executor configuration file. 
(Varun Vasudev via wandga)

Change-Id: Ibc6d2a959debe5d8ff2b51504149742449d1f1da


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec694145
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec694145
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec694145

Branch: refs/heads/YARN-5881
Commit: ec694145cf9c0ade7606813871ca2a4a371def8e
Parents: 63cfcb9
Author: Wangda Tan 
Authored: Wed Aug 9 10:51:29 2017 -0700
Committer: Wangda Tan 
Committed: Wed Aug 9 10:51:29 2017 -0700

--
 .../hadoop-yarn-server-nodemanager/pom.xml  |  38 ++
 .../src/CMakeLists.txt  |  22 +
 .../container-executor/impl/configuration.c | 672 +--
 .../container-executor/impl/configuration.h | 182 +++--
 .../impl/container-executor.c   |  39 +-
 .../impl/container-executor.h   |  52 +-
 .../container-executor/impl/get_executable.c|   1 +
 .../main/native/container-executor/impl/main.c  |  17 +-
 .../main/native/container-executor/impl/util.c  | 134 
 .../main/native/container-executor/impl/util.h  | 115 
 .../test-configurations/configuration-1.cfg |  31 +
 .../test-configurations/configuration-2.cfg |  28 +
 .../test/test-configurations/old-config.cfg |  25 +
 .../test/test-container-executor.c  |  15 +-
 .../test/test_configuration.cc  | 432 
 .../native/container-executor/test/test_main.cc |  29 +
 .../native/container-executor/test/test_util.cc | 138 
 17 files changed, 1649 insertions(+), 321 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec694145/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 28ee0d9..a50a769 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -215,6 +215,44 @@
   ${project.build.directory}/native-results
 
   
+  
+cetest
+cmake-test
+test
+
+  
+  cetest
+  
${project.build.directory}/native/test
+  ${basedir}/src
+  
${project.build.directory}/native/test/cetest
+  
+--gtest_filter=-Perf.
+
--gtest_output=xml:${project.build.directory}/surefire-reports/TEST-cetest.xml
+  
+  
${project.build.directory}/surefire-reports
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-antrun-plugin
+
+  
+make
+compile
+
+  run
+
+
+  
+
+  
+
+  
+
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec694145/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 5b52536..100d7ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -19,6 +19,9 @@ cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
 list(APPEND CMAKE_MODULE_PATH 
${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)
 
+# Set gtest path
+set(GTEST_SRC_DIR 
${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common/src/main/native/gtest)
+
 # determine if container-executor.conf.dir is an absolute
 # path in case the OS we're compiling on doesn't have
 # a hook in get_executable. We'll use this define
@@ -80,12 +83,20 @@ endfunction()
 include_directories(
 ${CMAKE_CURRENT_SOURCE_DIR}
 

[50/50] [abbrv] hadoop git commit: YARN-6471. Support to add min/max resource configuration for a queue. (Sunil G via wangda)

2017-08-11 Thread wangda
YARN-6471. Support to add min/max resource configuration for a queue. (Sunil G 
via wangda)

Change-Id: I9213f5297a6841fab5c573e85ee4c4e5f4a0b7ff


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95a81934
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95a81934
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95a81934

Branch: refs/heads/YARN-5881
Commit: 95a81934385a1a0f404930b8075e2a066fc6c413
Parents: 4222c97
Author: Wangda Tan 
Authored: Fri Aug 11 10:30:23 2017 -0700
Committer: Wangda Tan 
Committed: Fri Aug 11 10:30:23 2017 -0700

--
 .../org/apache/hadoop/util/StringUtils.java |  31 ++
 .../hadoop/yarn/util/UnitsConversionUtil.java   | 217 
 .../resource/DefaultResourceCalculator.java |   6 +
 .../resource/DominantResourceCalculator.java|   7 +
 .../yarn/util/resource/ResourceCalculator.java  |  12 +
 .../hadoop/yarn/util/resource/Resources.java|   5 +
 .../capacity/FifoCandidatesSelector.java|   9 +-
 .../ProportionalCapacityPreemptionPolicy.java   |  10 +-
 .../monitor/capacity/TempQueuePerPartition.java |  16 +-
 .../scheduler/AbstractResourceUsage.java| 198 +++
 .../scheduler/QueueResourceQuotas.java  | 153 ++
 .../scheduler/ResourceUsage.java| 237 ++---
 .../scheduler/capacity/AbstractCSQueue.java | 162 +-
 .../scheduler/capacity/CSQueue.java |  42 +-
 .../scheduler/capacity/CSQueueUtils.java|  24 +-
 .../CapacitySchedulerConfiguration.java | 179 ++-
 .../scheduler/capacity/LeafQueue.java   |  31 +-
 .../scheduler/capacity/ParentQueue.java | 203 +++-
 .../scheduler/capacity/UsersManager.java|   5 +-
 .../PriorityUtilizationQueueOrderingPolicy.java |  11 +
 .../webapp/dao/CapacitySchedulerQueueInfo.java  |  15 +
 .../yarn/server/resourcemanager/MockNM.java |   8 +
 .../yarn/server/resourcemanager/MockRM.java |   6 +
 ...alCapacityPreemptionPolicyMockFramework.java |  13 +
 ...estProportionalCapacityPreemptionPolicy.java |  29 +-
 ...pacityPreemptionPolicyIntraQueueWithDRF.java |   6 +-
 .../TestAbsoluteResourceConfiguration.java  | 516 +++
 .../capacity/TestApplicationLimits.java |  30 +-
 .../TestApplicationLimitsByPartition.java   |   4 +
 .../capacity/TestCapacityScheduler.java |   2 +-
 .../scheduler/capacity/TestChildQueueOrder.java |   2 +
 .../scheduler/capacity/TestLeafQueue.java   | 261 --
 .../scheduler/capacity/TestParentQueue.java |   8 +
 .../scheduler/capacity/TestReservations.java|  17 +
 ...tPriorityUtilizationQueueOrderingPolicy.java |   3 +
 .../webapp/TestRMWebServicesCapacitySched.java  |   4 +-
 36 files changed, 2046 insertions(+), 436 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95a81934/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index cda5ec7..1be8a08 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -1152,4 +1152,35 @@ public class StringUtils {
 return s1.equalsIgnoreCase(s2);
   }
 
+  /**
+   * Checks if the String contains only unicode letters.
+   *
+   * null will return false.
+   * An empty String (length()=0) will return true.
+   *
+   * 
+   * StringUtils.isAlpha(null)   = false
+   * StringUtils.isAlpha("") = true
+   * StringUtils.isAlpha("  ")   = false
+   * StringUtils.isAlpha("abc")  = true
+   * StringUtils.isAlpha("ab2c") = false
+   * StringUtils.isAlpha("ab-c") = false
+   * 
+   *
+   * @param str  the String to check, may be null
+   * @return true if only contains letters, and is non-null
+   */
+  public static boolean isAlpha(String str) {
+  if (str == null) {
+  return false;
+  }
+  int sz = str.length();
+  for (int i = 0; i < sz; i++) {
+  if (Character.isLetter(str.charAt(i)) == false) {
+  return false;
+  }
+  }
+  return true;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95a81934/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
 

[44/50] [abbrv] hadoop git commit: MAPREDUCE-6870. Add configuration for MR job to finish when all reducers are complete. (Peter Bacsko via Haibo Chen)

2017-08-11 Thread wangda
MAPREDUCE-6870. Add configuration for MR job to finish when all reducers are 
complete. (Peter Bacsko via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a32e0138
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a32e0138
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a32e0138

Branch: refs/heads/YARN-5881
Commit: a32e0138fb63c92902e6613001f38a87c8a41321
Parents: 312e57b
Author: Haibo Chen 
Authored: Thu Aug 10 15:17:36 2017 -0700
Committer: Haibo Chen 
Committed: Thu Aug 10 15:17:36 2017 -0700

--
 .../mapreduce/v2/app/job/impl/JobImpl.java  |  35 -
 .../mapreduce/v2/app/job/impl/TestJobImpl.java  | 139 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java|   6 +-
 .../src/main/resources/mapred-default.xml   |   8 ++
 4 files changed, 160 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32e0138/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 4d155d0..6880b6c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -644,6 +644,8 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
   private float reduceProgress;
   private float cleanupProgress;
   private boolean isUber = false;
+  private boolean finishJobWhenReducersDone;
+  private boolean completingJob = false;
 
   private Credentials jobCredentials;
   private Token jobToken;
@@ -717,6 +719,9 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 this.maxFetchFailuresNotifications = conf.getInt(
 MRJobConfig.MAX_FETCH_FAILURES_NOTIFICATIONS,
 MRJobConfig.DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS);
+this.finishJobWhenReducersDone = conf.getBoolean(
+MRJobConfig.FINISH_JOB_WHEN_REDUCERS_DONE,
+MRJobConfig.DEFAULT_FINISH_JOB_WHEN_REDUCERS_DONE);
   }
 
   protected StateMachine 
getStateMachine() {
@@ -2021,7 +2026,9 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 TimeUnit.MILLISECONDS);
 return JobStateInternal.FAIL_WAIT;
   }
-  
+
+  checkReadyForCompletionWhenAllReducersDone(job);
+
   return job.checkReadyForCommit();
 }
 
@@ -2052,6 +2059,32 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
   }
   job.metrics.killedTask(task);
 }
+
+   /** Improvement: if all reducers have finished, we check if we have
+   restarted mappers that are still running. This can happen in a
+   situation when a node becomes UNHEALTHY and mappers are rescheduled.
+   See MAPREDUCE-6870 for details */
+private void checkReadyForCompletionWhenAllReducersDone(JobImpl job) {
+  if (job.finishJobWhenReducersDone) {
+int totalReduces = job.getTotalReduces();
+int completedReduces = job.getCompletedReduces();
+
+if (totalReduces > 0 && totalReduces == completedReduces
+&& !job.completingJob) {
+
+  for (TaskId mapTaskId : job.mapTasks) {
+MapTaskImpl task = (MapTaskImpl) job.tasks.get(mapTaskId);
+if (!task.isFinished()) {
+  LOG.info("Killing map task " + task.getID());
+  job.eventHandler.handle(
+  new TaskEvent(task.getID(), TaskEventType.T_KILL));
+}
+  }
+
+  job.completingJob = true;
+}
+  }
+}
   }
 
   // Transition class for handling jobs with no tasks

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32e0138/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
 

[01/50] [abbrv] hadoop git commit: HADOOP-13963. /bin/bash is hard coded in some of the scripts. Contributed by Ajay Yadav.

2017-08-11 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5881 686a634f0 -> 95a819343


HADOOP-13963. /bin/bash is hard coded in some of the scripts. Contributed by 
Ajay Yadav.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6fdeb8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6fdeb8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6fdeb8a

Branch: refs/heads/YARN-5881
Commit: a6fdeb8a872d413c76257a32914ade1d0e944583
Parents: 02bf328
Author: Arpit Agarwal 
Authored: Fri Aug 4 10:40:52 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Aug 4 10:40:52 2017 -0700

--
 dev-support/docker/hadoop_env_checks.sh| 2 +-
 dev-support/findHangingTest.sh | 2 +-
 dev-support/verify-xml.sh  | 2 +-
 .../src/test/scripts/hadoop-functions_test_helper.bash | 2 +-
 start-build-env.sh | 2 +-
 5 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6fdeb8a/dev-support/docker/hadoop_env_checks.sh
--
diff --git a/dev-support/docker/hadoop_env_checks.sh 
b/dev-support/docker/hadoop_env_checks.sh
index 910c802..5cb4b2b 100755
--- a/dev-support/docker/hadoop_env_checks.sh
+++ b/dev-support/docker/hadoop_env_checks.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6fdeb8a/dev-support/findHangingTest.sh
--
diff --git a/dev-support/findHangingTest.sh b/dev-support/findHangingTest.sh
index f7ebe47..fcda9ff 100644
--- a/dev-support/findHangingTest.sh
+++ b/dev-support/findHangingTest.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6fdeb8a/dev-support/verify-xml.sh
--
diff --git a/dev-support/verify-xml.sh b/dev-support/verify-xml.sh
index abab4e6..9ef456a 100755
--- a/dev-support/verify-xml.sh
+++ b/dev-support/verify-xml.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6fdeb8a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
 
b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
index 86608ed..fa34bdf 100755
--- 
a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
+++ 
b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6fdeb8a/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index 18e3a8c..94af7e4 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: YARN-6873. Moving logging APIs over to slf4j in hadoop-yarn-server-applicationhistoryservice. Contributed by Yeliang Cang.

2017-08-11 Thread wangda
YARN-6873. Moving logging APIs over to slf4j in 
hadoop-yarn-server-applicationhistoryservice. Contributed by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/839e077f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/839e077f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/839e077f

Branch: refs/heads/YARN-5881
Commit: 839e077faf4019d6efdcd89d95930023cd0b0a08
Parents: a4eb701
Author: Akira Ajisaka 
Authored: Mon Aug 7 18:56:00 2017 +0900
Committer: Akira Ajisaka 
Committed: Mon Aug 7 18:56:00 2017 +0900

--
 .../ApplicationHistoryClientService.java|  8 ++---
 .../ApplicationHistoryManagerImpl.java  |  8 ++---
 ...pplicationHistoryManagerOnTimelineStore.java |  8 ++---
 .../ApplicationHistoryServer.java   | 10 +++---
 .../FileSystemApplicationHistoryStore.java  | 22 ++--
 .../webapp/AHSWebServices.java  |  7 ++--
 .../webapp/NavBlock.java|  8 ++---
 .../timeline/KeyValueBasedTimelineStore.java|  8 ++---
 .../server/timeline/LeveldbTimelineStore.java   | 35 ++--
 .../yarn/server/timeline/RollingLevelDB.java| 15 +
 .../timeline/RollingLevelDBTimelineStore.java   | 22 ++--
 .../server/timeline/TimelineDataManager.java|  7 ++--
 .../recovery/LeveldbTimelineStateStore.java | 30 -
 .../timeline/security/TimelineACLsManager.java  |  7 ++--
 ...lineDelegationTokenSecretManagerService.java |  8 ++---
 .../timeline/webapp/TimelineWebServices.java|  7 ++--
 .../TestFileSystemApplicationHistoryStore.java  |  8 ++---
 .../timeline/TestLeveldbTimelineStore.java  |  2 +-
 18 files changed, 111 insertions(+), 109 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/839e077f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
index 73d5d39..7d57048 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
@@ -22,8 +22,6 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -61,11 +59,13 @@ import org.apache.hadoop.yarn.ipc.YarnRPC;
 import 
org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ApplicationHistoryClientService extends AbstractService implements
 ApplicationHistoryProtocol {
-  private static final Log LOG = LogFactory
-.getLog(ApplicationHistoryClientService.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ApplicationHistoryClientService.class);
   private ApplicationHistoryManager history;
   private Server server;
   private InetSocketAddress bindAddress;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/839e077f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
 

[38/50] [abbrv] hadoop git commit: HDFS-12278. LeaseManager operations are inefficient in 2.8. Contributed by Rushabh S Shah.

2017-08-11 Thread wangda
HDFS-12278. LeaseManager operations are inefficient in 2.8. Contributed by 
Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5c02f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5c02f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5c02f95

Branch: refs/heads/YARN-5881
Commit: b5c02f95b5a2fcb8931d4a86f8192caa18009ea9
Parents: ec69414
Author: Kihwal Lee 
Authored: Wed Aug 9 16:46:05 2017 -0500
Committer: Kihwal Lee 
Committed: Wed Aug 9 16:46:05 2017 -0500

--
 .../hadoop/hdfs/server/namenode/LeaseManager.java | 18 --
 1 file changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5c02f95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 6578ba9..35ec063 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -26,10 +26,11 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashSet;
 import java.util.List;
-import java.util.PriorityQueue;
+import java.util.NavigableSet;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -87,11 +88,15 @@ public class LeaseManager {
   // Mapping: leaseHolder -> Lease
   private final SortedMap leases = new TreeMap<>();
   // Set of: Lease
-  private final PriorityQueue sortedLeases = new PriorityQueue<>(512,
+  private final NavigableSet sortedLeases = new TreeSet<>(
   new Comparator() {
 @Override
 public int compare(Lease o1, Lease o2) {
-  return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
+  if (o1.getLastUpdate() != o2.getLastUpdate()) {
+return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
+  } else {
+return o1.holder.compareTo(o2.holder);
+  }
 }
   });
   // INodeID -> Lease
@@ -528,9 +533,10 @@ public class LeaseManager {
 
 long start = monotonicNow();
 
-while(!sortedLeases.isEmpty() && sortedLeases.peek().expiredHardLimit()
-  && !isMaxLockHoldToReleaseLease(start)) {
-  Lease leaseToCheck = sortedLeases.peek();
+while(!sortedLeases.isEmpty() &&
+sortedLeases.first().expiredHardLimit()
+&& !isMaxLockHoldToReleaseLease(start)) {
+  Lease leaseToCheck = sortedLeases.first();
   LOG.info(leaseToCheck + " has expired hard limit");
 
   final List removing = new ArrayList<>();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HADOOP-14730. Support protobuf FileStatus in AdlFileSystem.

2017-08-11 Thread wangda
HADOOP-14730. Support protobuf FileStatus in AdlFileSystem.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55a181f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55a181f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55a181f8

Branch: refs/heads/YARN-5881
Commit: 55a181f845adcdcc9b008e9906ade1544fc220e4
Parents: 8d3fd81
Author: Chris Douglas 
Authored: Mon Aug 7 21:31:28 2017 -0700
Committer: Chris Douglas 
Committed: Mon Aug 7 21:31:28 2017 -0700

--
 .../org/apache/hadoop/fs/adl/AdlFileStatus.java | 69 
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 27 ++--
 .../apache/hadoop/fs/adl/TestGetFileStatus.java | 57 
 .../apache/hadoop/fs/adl/TestListStatus.java|  8 ++-
 4 files changed, 105 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55a181f8/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileStatus.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileStatus.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileStatus.java
new file mode 100644
index 000..70c005d
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileStatus.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.fs.adl;
+
+import com.microsoft.azure.datalake.store.DirectoryEntry;
+import com.microsoft.azure.datalake.store.DirectoryEntryType;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_BLOCK_SIZE;
+import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_REPLICATION_FACTOR;
+
+/**
+ * Shim class supporting linking against 2.x clients.
+ */
+class AdlFileStatus extends FileStatus {
+
+  private static final long serialVersionUID = 0x01fcbe5e;
+
+  private boolean hasAcl = false;
+
+  AdlFileStatus(DirectoryEntry entry, Path path, boolean hasAcl) {
+this(entry, path, entry.user, entry.group, hasAcl);
+  }
+
+  AdlFileStatus(DirectoryEntry entry, Path path,
+String owner, String group, boolean hasAcl) {
+super(entry.length, DirectoryEntryType.DIRECTORY == entry.type,
+ADL_REPLICATION_FACTOR, ADL_BLOCK_SIZE,
+entry.lastModifiedTime.getTime(), entry.lastAccessTime.getTime(),
+new AdlPermission(hasAcl, Short.parseShort(entry.permission, 8)),
+owner, group, null, path);
+this.hasAcl = hasAcl;
+  }
+
+  @Override
+  public boolean hasAcl() {
+return hasAcl;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+// satisfy findbugs
+return super.equals(o);
+  }
+
+  @Override
+  public int hashCode() {
+// satisfy findbugs
+return super.hashCode();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55a181f8/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 0de538e..76ce43e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -29,7 +29,6 @@ import com.google.common.annotations.VisibleForTesting;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
-import com.microsoft.azure.datalake.store.DirectoryEntryType;
 import 

[28/50] [abbrv] hadoop git commit: HDFS-11975. Provide a system-default EC policy. Contributed by Huichun Lu

2017-08-11 Thread wangda
HDFS-11975. Provide a system-default EC policy. Contributed by Huichun Lu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a53b8b6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a53b8b6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a53b8b6f

Branch: refs/heads/YARN-5881
Commit: a53b8b6fdce111b1e35ad0dc563eb53d1c58462f
Parents: ad2a350
Author: Kai Zheng 
Authored: Wed Aug 9 10:12:58 2017 +0800
Committer: Kai Zheng 
Committed: Wed Aug 9 10:12:58 2017 +0800

--
 .../hadoop/hdfs/DistributedFileSystem.java  |  2 --
 .../ClientNamenodeProtocolTranslatorPB.java |  4 ++-
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 +++
 ...tNamenodeProtocolServerSideTranslatorPB.java |  4 ++-
 .../namenode/ErasureCodingPolicyManager.java| 12 +--
 .../hdfs/server/namenode/NameNodeRpcServer.java | 14 +++-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 14 
 .../src/main/resources/hdfs-default.xml |  8 +
 .../src/site/markdown/HDFSErasureCoding.md  |  8 +
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 24 --
 .../server/namenode/TestEnabledECPolicies.java  | 10 +++---
 .../test/resources/testErasureCodingConf.xml| 35 
 13 files changed, 117 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a53b8b6f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 13c5eb9..cd368d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2515,8 +2515,6 @@ public class DistributedFileSystem extends FileSystem {
   public void setErasureCodingPolicy(final Path path,
   final String ecPolicyName) throws IOException {
 Path absF = fixRelativePart(path);
-Preconditions.checkNotNull(ecPolicyName, "Erasure coding policy cannot be" 
+
-" null.");
 new FileSystemLinkResolver() {
   @Override
   public Void doCall(final Path p) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a53b8b6f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 388788c..aed4117 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -1518,7 +1518,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
 final SetErasureCodingPolicyRequestProto.Builder builder =
 SetErasureCodingPolicyRequestProto.newBuilder();
 builder.setSrc(src);
-builder.setEcPolicyName(ecPolicyName);
+if (ecPolicyName != null) {
+  builder.setEcPolicyName(ecPolicyName);
+}
 SetErasureCodingPolicyRequestProto req = builder.build();
 try {
   rpcProxy.setErasureCodingPolicy(null, req);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a53b8b6f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index 65baab6..9f80350 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@ -25,7 +25,7 @@ import "hdfs.proto";
 
 message SetErasureCodingPolicyRequestProto {
   required string src = 1;
-  required string ecPolicyName = 2;
+  optional string ecPolicyName = 2;
 }
 
 message SetErasureCodingPolicyResponseProto {


[48/50] [abbrv] hadoop git commit: YARN-6471. Support to add min/max resource configuration for a queue. (Sunil G via wangda)

2017-08-11 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/95a81934/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index d45f756..a74274c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -191,6 +191,8 @@ public class TestLeafQueue {
 CapacitySchedulerConfiguration.ROOT, 
 queues, queues, 
 TestUtils.spyHook);
+root.updateClusterResource(Resources.createResource(100 * 16 * GB, 100 * 
32),
+new ResourceLimits(Resources.createResource(100 * 16 * GB, 100 * 32)));
 
 ResourceUsage queueResUsage = root.getQueueResourceUsage();
 when(csContext.getClusterResourceUsage())
@@ -307,13 +309,11 @@ public class TestLeafQueue {
 // Verify the value for getAMResourceLimit for queues with < .1 maxcap
 Resource clusterResource = Resource.newInstance(50 * GB, 50);
 
-a.updateClusterResource(clusterResource,
+root.updateClusterResource(clusterResource,
 new ResourceLimits(clusterResource));
 assertEquals(Resource.newInstance(1 * GB, 1),
 a.calculateAndGetAMResourceLimit());
 
-b.updateClusterResource(clusterResource,
-new ResourceLimits(clusterResource));
 assertEquals(Resource.newInstance(5 * GB, 1),
 b.calculateAndGetAMResourceLimit());
   }
@@ -358,6 +358,8 @@ public class TestLeafQueue {
 Resource clusterResource = 
 Resources.createResource(numNodes * (8*GB), numNodes * 16);
 when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+root.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
 
 // Setup resource-requests
 Priority priority = TestUtils.createMockPriority(1);
@@ -556,6 +558,8 @@ public class TestLeafQueue {
 Resource clusterResource = 
 Resources.createResource(numNodes * (8*GB), numNodes * 16);
 when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+root.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
 
 // Setup resource-requests
 Priority priority = TestUtils.createMockPriority(1);
@@ -630,6 +634,8 @@ public class TestLeafQueue {
 // Test max-capacity
 // Now - no more allocs since we are at max-cap
 a.setMaxCapacity(0.5f);
+root.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
 applyCSAssignment(clusterResource,
 a.assignContainers(clusterResource, node_0,
 new ResourceLimits(clusterResource),
@@ -699,6 +705,8 @@ public class TestLeafQueue {
 Resource clusterResource =
 Resources.createResource(numNodes * (80 * GB), numNodes * 100);
 when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+root.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
 
 // Set user-limit. Need a small queue within a large cluster.
 b.setUserLimit(50);
@@ -779,6 +787,8 @@ public class TestLeafQueue {
 Resources.createResource(numNodes * (8 * GB), numNodes * 100);
 when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 when(csContext.getClusterResource()).thenReturn(clusterResource);
+root.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
 
 // Setup resource-requests so that one application is memory dominant
 // and other application is vcores dominant
@@ -891,6 +901,8 @@ public class TestLeafQueue {
 Resource clusterResource = 
 Resources.createResource(numNodes * (8*GB), numNodes * 16);
 when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+root.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
  
 // Setup resource-requests
 Priority priority = TestUtils.createMockPriority(1);
@@ -915,6 +927,8 @@ public class TestLeafQueue {
 // Set user-limit
 a.setUserLimit(50);
 a.setUserLimitFactor(2);
+root.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
 
 // There're two active users
 assertEquals(2, 

[11/50] [abbrv] hadoop git commit: HDFS-12306. Add audit log for some erasure coding operations. Contributed by Huafeng Wang

2017-08-11 Thread wangda
HDFS-12306. Add audit log for some erasure coding operations. Contributed by 
Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b674360
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b674360
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b674360

Branch: refs/heads/YARN-5881
Commit: 0b67436068899497e99c86f37fd4887ca188fae2
Parents: b0fbf17
Author: Kai Zheng 
Authored: Mon Aug 7 19:30:10 2017 +0800
Committer: Kai Zheng 
Committed: Mon Aug 7 19:30:10 2017 +0800

--
 .../hdfs/server/namenode/FSNamesystem.java  | 48 
 .../hdfs/server/namenode/NameNodeRpcServer.java |  2 +-
 2 files changed, 29 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b674360/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 229de05..b1639b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7055,18 +7055,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   resultingStat = FSDirErasureCodingOp.setErasureCodingPolicy(this,
   srcArg, ecPolicyName, pc, logRetryCache);
   success = true;
-} catch (AccessControlException ace) {
-  logAuditEvent(success, operationName, srcArg, null,
-  resultingStat);
-  throw ace;
 } finally {
   writeUnlock(operationName);
   if (success) {
 getEditLog().logSync();
   }
+  logAuditEvent(success, operationName, srcArg, null, resultingStat);
 }
-logAuditEvent(success, operationName, srcArg, null,
-resultingStat);
   }
 
   /**
@@ -7074,9 +7069,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* @param policies The policies to add.
* @return The according result of add operation.
*/
-  AddECPolicyResponse[] addECPolicies(ErasureCodingPolicy[] policies)
+  AddECPolicyResponse[] addErasureCodingPolicies(ErasureCodingPolicy[] 
policies)
   throws IOException {
-final String operationName = "addECPolicies";
+final String operationName = "addErasureCodingPolicies";
 String addECPolicyName = "";
 checkOperation(OperationCategory.WRITE);
 List responses = new ArrayList<>();
@@ -7201,18 +7196,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   resultingStat = FSDirErasureCodingOp.unsetErasureCodingPolicy(this,
   srcArg, pc, logRetryCache);
   success = true;
-} catch (AccessControlException ace) {
-  logAuditEvent(success, operationName, srcArg, null,
-  resultingStat);
-  throw ace;
 } finally {
   writeUnlock(operationName);
   if (success) {
 getEditLog().logSync();
   }
+  logAuditEvent(success, operationName, srcArg, null, resultingStat);
 }
-logAuditEvent(success, operationName, srcArg, null,
-resultingStat);
   }
 
   /**
@@ -7220,14 +7210,20 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
*/
   ErasureCodingPolicy getErasureCodingPolicy(String src)
   throws AccessControlException, UnresolvedLinkException, IOException {
+final String operationName = "getErasureCodingPolicy";
+boolean success = false;
 checkOperation(OperationCategory.READ);
 FSPermissionChecker pc = getPermissionChecker();
 readLock();
 try {
   checkOperation(OperationCategory.READ);
-  return FSDirErasureCodingOp.getErasureCodingPolicy(this, src, pc);
+  final ErasureCodingPolicy ret =
+  FSDirErasureCodingOp.getErasureCodingPolicy(this, src, pc);
+  success = true;
+  return ret;
 } finally {
-  readUnlock("getErasureCodingPolicy");
+  readUnlock(operationName);
+  logAuditEvent(success, operationName, null);
 }
   }
 
@@ -7235,13 +7231,19 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* Get available erasure coding polices
*/
   ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
+final String operationName = "getErasureCodingPolicies";
+boolean success = false;
 checkOperation(OperationCategory.READ);
 readLock();
 try {
   checkOperation(OperationCategory.READ);
-  return 

[04/50] [abbrv] hadoop git commit: YARN-6811. [ATS1.5] All history logs should be kept under its own User Directory. Contributed by Rohith Sharma K S.

2017-08-11 Thread wangda
YARN-6811. [ATS1.5] All history logs should be kept under its own User 
Directory. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f44b349b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f44b349b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f44b349b

Branch: refs/heads/YARN-5881
Commit: f44b349b813508f0f6d99ca10bddba683dedf6c4
Parents: bbc6d25
Author: Junping Du 
Authored: Fri Aug 4 16:03:56 2017 -0700
Committer: Junping Du 
Committed: Fri Aug 4 16:03:56 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  4 +
 .../api/impl/FileSystemTimelineWriter.java  | 40 ++--
 .../src/main/resources/yarn-default.xml | 10 ++
 .../api/impl/TestTimelineClientForATS1_5.java   | 81 
 .../timeline/EntityGroupFSTimelineStore.java| 23 -
 .../TestEntityGroupFSTimelineStore.java | 99 ++--
 6 files changed, 224 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f44b349b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d608df8..71a7134 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2069,6 +2069,10 @@ public class YarnConfiguration extends Configuration {
   = TIMELINE_SERVICE_PREFIX
   + "entity-file.fs-support-append";
 
+  public static final String
+  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR =
+  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "with-user-dir";
+
   /**
* Settings for timeline service v2.0
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f44b349b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index fc3385b..b7bb48e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
@@ -145,9 +145,12 @@ public class FileSystemTimelineWriter extends 
TimelineWriter{
 new LogFDsCache(flushIntervalSecs, cleanIntervalSecs, ttl,
 timerTaskTTL);
 
-this.isAppendSupported =
-conf.getBoolean(
-YarnConfiguration.TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND, 
true);
+this.isAppendSupported = conf.getBoolean(
+YarnConfiguration.TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND, true);
+
+boolean storeInsideUserDir = conf.getBoolean(
+YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR,
+false);
 
 objMapper = createObjectMapper();
 
@@ -157,8 +160,8 @@ public class FileSystemTimelineWriter extends 
TimelineWriter{
 YarnConfiguration
 .DEFAULT_TIMELINE_SERVICE_CLIENT_INTERNAL_ATTEMPT_DIR_CACHE_SIZE);
 
-attemptDirCache =
-new AttemptDirCache(attemptDirCacheSize, fs, activePath);
+attemptDirCache = new AttemptDirCache(attemptDirCacheSize, fs, activePath,
+authUgi, storeInsideUserDir);
 
 if (LOG.isDebugEnabled()) {
   StringBuilder debugMSG = new StringBuilder();
@@ -171,6 +174,8 @@ public class FileSystemTimelineWriter extends 
TimelineWriter{
   + "=" + ttl + ", " +
   YarnConfiguration.TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND
   + "=" + isAppendSupported + ", " +
+  YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR
+  + "=" + storeInsideUserDir + ", " +
   YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR
   + "=" + activePath);
 
@@ -946,8 +951,11 @@ public class FileSystemTimelineWriter extends 
TimelineWriter{
 private final Map 

[19/50] [abbrv] hadoop git commit: YARN-6757. Refactor the usage of yarn.nodemanager.linux-container-executor.cgroups.mount-path (Contributed by Miklos Szegedi via Daniel Templeton)

2017-08-11 Thread wangda
YARN-6757. Refactor the usage of 
yarn.nodemanager.linux-container-executor.cgroups.mount-path
(Contributed by Miklos Szegedi via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47b145b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47b145b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47b145b9

Branch: refs/heads/YARN-5881
Commit: 47b145b9b4e81d781891abce8a6638f0b436acc4
Parents: 9891295
Author: Daniel Templeton 
Authored: Tue Aug 8 10:33:26 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Aug 8 10:33:26 2017 -0700

--
 .../src/main/resources/yarn-default.xml | 43 ++-
 .../linux/resources/CGroupsHandler.java | 15 +
 .../linux/resources/CGroupsHandlerImpl.java | 26 +
 .../linux/resources/ResourceHandlerModule.java  | 58 ++--
 .../util/CgroupsLCEResourcesHandler.java| 53 --
 .../linux/resources/TestCGroupsHandlerImpl.java | 27 -
 .../util/TestCgroupsLCEResourcesHandler.java| 31 +++
 .../src/site/markdown/GracefulDecommission.md   | 12 ++--
 .../src/site/markdown/NodeManagerCgroups.md | 17 +-
 .../site/markdown/WritingYarnApplications.md|  4 +-
 .../src/site/markdown/registry/yarn-registry.md | 14 ++---
 11 files changed, 237 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47b145b9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 95b8a88..000e892 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -134,7 +134,7 @@
 
   
   
-This configures the HTTP endpoint for Yarn Daemons.The following
+This configures the HTTP endpoint for YARN Daemons.The following
 values are supported:
 - HTTP_ONLY : Service is provided only on http
 - HTTPS_ONLY : Service is provided only on https
@@ -1063,14 +1063,14 @@
   DeletionService will delete the application's localized file directory
   and log directory.
   
-  To diagnose Yarn application problems, set this property's value large
+  To diagnose YARN application problems, set this property's value large
   enough (for example, to 600 = 10 minutes) to permit examination of these
   directories. After changing the property's value, you must restart the 
   nodemanager in order for it to have an effect.
 
-  The roots of Yarn applications' work directories is configurable with
+  The roots of YARN applications' work directories is configurable with
   the yarn.nodemanager.local-dirs property (see below), and the roots
-  of the Yarn applications' log directories is configurable with the 
+  of the YARN applications' log directories is configurable with the
   yarn.nodemanager.log-dirs property (see also below).
 
 yarn.nodemanager.delete.debug-delay-sec
@@ -1510,28 +1510,45 @@
   
 The cgroups hierarchy under which to place YARN proccesses 
(cannot contain commas).
 If yarn.nodemanager.linux-container-executor.cgroups.mount is false
-(that is, if cgroups have been pre-configured) and the Yarn user has write
+(that is, if cgroups have been pre-configured) and the YARN user has write
 access to the parent directory, then the directory will be created.
-If the directory already exists, the administrator has to give Yarn
+If the directory already exists, the administrator has to give YARN
 write permissions to it recursively.
-Only used when the LCE resources handler is set to the 
CgroupsLCEResourcesHandler.
+This property only applies when the LCE resources handler is set to
+CgroupsLCEResourcesHandler.
 yarn.nodemanager.linux-container-executor.cgroups.hierarchy
 /hadoop-yarn
   
 
   
 Whether the LCE should attempt to mount cgroups if not found.
-Only used when the LCE resources handler is set to the 
CgroupsLCEResourcesHandler.
+This property only applies when the LCE resources handler is set to
+CgroupsLCEResourcesHandler.
+
 yarn.nodemanager.linux-container-executor.cgroups.mount
 false
   
 
   
-Where the LCE should attempt to mount cgroups if not found. 
Common locations
-include /sys/fs/cgroup and /cgroup; the default location can vary 
depending on the Linux
-distribution in use. 

[12/50] [abbrv] hadoop git commit: HADOOP-14727. Socket not closed properly when reading Configurations with BlockReaderRemote. Contributed by Jonathan Eagles.

2017-08-11 Thread wangda
HADOOP-14727. Socket not closed properly when reading Configurations with 
BlockReaderRemote. Contributed by Jonathan Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3a9c976
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3a9c976
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3a9c976

Branch: refs/heads/YARN-5881
Commit: a3a9c976c3cfa3ab6b0936eb8cf0889891bd0678
Parents: 0b67436
Author: Xiao Chen 
Authored: Fri Aug 4 20:53:45 2017 -0700
Committer: Xiao Chen 
Committed: Mon Aug 7 10:25:52 2017 -0700

--
 .../java/org/apache/hadoop/conf/Configuration.java   | 15 ++-
 .../org/apache/hadoop/conf/TestConfiguration.java|  6 --
 2 files changed, 14 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a9c976/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index e26d3a8..65e8569 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.conf;
 
+import com.ctc.wstx.io.StreamBootstrapper;
+import com.ctc.wstx.io.SystemId;
 import com.ctc.wstx.stax.WstxInputFactory;
 import com.fasterxml.jackson.core.JsonFactory;
 import com.fasterxml.jackson.core.JsonGenerator;
@@ -94,7 +96,6 @@ import 
org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringUtils;
-import org.codehaus.stax2.XMLInputFactory2;
 import org.codehaus.stax2.XMLStreamReader2;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -285,7 +286,8 @@ public class Configuration implements 
Iterable>,
* Specify exact input factory to avoid time finding correct one.
* Factory is reusable across un-synchronized threads once initialized
*/
-  private static final XMLInputFactory2 XML_INPUT_FACTORY = new 
WstxInputFactory();
+  private static final WstxInputFactory XML_INPUT_FACTORY =
+  new WstxInputFactory();
 
   /**
* Class to keep the information about the keys which replace the deprecated
@@ -2647,15 +2649,18 @@ public class Configuration implements 
Iterable>,
 return parse(connection.getInputStream(), url.toString());
   }
 
-  private XMLStreamReader parse(InputStream is,
-  String systemId) throws IOException, XMLStreamException {
+  private XMLStreamReader parse(InputStream is, String systemIdStr)
+  throws IOException, XMLStreamException {
 if (!quietmode) {
   LOG.debug("parsing input stream " + is);
 }
 if (is == null) {
   return null;
 }
-return XML_INPUT_FACTORY.createXMLStreamReader(systemId, is);
+SystemId systemId = SystemId.construct(systemIdStr);
+return XML_INPUT_FACTORY.createSR(XML_INPUT_FACTORY.createPrivateConfig(),
+systemId, StreamBootstrapper.getInstance(null, systemId, is), false,
+true);
   }
 
   private void loadResources(Properties properties,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a9c976/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 2af61c0..92d3290 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -155,11 +155,13 @@ public class TestConfiguration extends TestCase {
 startConfig();
 declareProperty("prop", "A", "A");
 endConfig();
-
-InputStream in1 = new ByteArrayInputStream(writer.toString().getBytes());
+
+InputStream in1 = Mockito.spy(new ByteArrayInputStream(
+  writer.toString().getBytes()));
 Configuration conf = new Configuration(false);
 conf.addResource(in1);
 assertEquals("A", conf.get("prop"));
+Mockito.verify(in1, Mockito.times(1)).close();
 InputStream in2 = new ByteArrayInputStream(writer.toString().getBytes());
 conf.addResource(in2);
 

[23/50] [abbrv] hadoop git commit: YARN-6879. TestLeafQueue.testDRFUserLimits() has commented out code (Contributed by Angela Wang via Daniel Templeton)

2017-08-11 Thread wangda
YARN-6879. TestLeafQueue.testDRFUserLimits() has commented out code
(Contributed by Angela Wang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0c24145
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0c24145
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0c24145

Branch: refs/heads/YARN-5881
Commit: e0c24145d2c2a7d2cf10864fb4800cb1dcbc2977
Parents: 1794de3
Author: Daniel Templeton 
Authored: Tue Aug 8 13:35:22 2017 -0700
Committer: Daniel Templeton 
Committed: Tue Aug 8 13:35:22 2017 -0700

--
 .../server/resourcemanager/scheduler/capacity/TestLeafQueue.java   | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0c24145/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 2864d7f..d45f756 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -820,8 +820,6 @@ public class TestLeafQueue {
   applyCSAssignment(clusterResource, assign, b, nodes, apps);
 } while (assign.getResource().getMemorySize() > 0 &&
 assign.getAssignmentInformation().getNumReservations() == 0);
-//LOG.info("user_0: " + queueUser0.getUsed());
-//LOG.info("user_1: " + queueUser1.getUsed());
 
 assertTrue("Verify user_0 got resources ", queueUser0.getUsed()
 .getMemorySize() > 0);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HADOOP-14598. Blacklist Http/HttpsFileSystem in FsUrlStreamHandlerFactory. Contributed by Steve Loughran.

2017-08-11 Thread wangda
HADOOP-14598. Blacklist Http/HttpsFileSystem in FsUrlStreamHandlerFactory. 
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1db4788b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1db4788b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1db4788b

Branch: refs/heads/YARN-5881
Commit: 1db4788b7d22e57f91520e4a6971774ef84ffab9
Parents: f4e1aa0
Author: Haohui Mai 
Authored: Tue Aug 8 16:27:23 2017 -0700
Committer: Haohui Mai 
Committed: Tue Aug 8 16:33:18 2017 -0700

--
 .../org/apache/hadoop/fs/FsUrlConnection.java   | 10 
 .../hadoop/fs/FsUrlStreamHandlerFactory.java| 26 ++-
 .../apache/hadoop/fs/TestUrlStreamHandler.java  | 48 +++-
 3 files changed, 72 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1db4788b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
index 90e75b0..03c7aed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
@@ -23,6 +23,10 @@ import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URLConnection;
 
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +37,8 @@ import org.apache.hadoop.conf.Configuration;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 class FsUrlConnection extends URLConnection {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FsUrlConnection.class);
 
   private Configuration conf;
 
@@ -40,12 +46,16 @@ class FsUrlConnection extends URLConnection {
 
   FsUrlConnection(Configuration conf, URL url) {
 super(url);
+Preconditions.checkArgument(conf != null, "null conf argument");
+Preconditions.checkArgument(url != null, "null url argument");
 this.conf = conf;
   }
 
   @Override
   public void connect() throws IOException {
+Preconditions.checkState(is == null, "Already connected");
 try {
+  LOG.debug("Connecting to {}", url);
   FileSystem fs = FileSystem.get(url.toURI(), conf);
   is = fs.open(new Path(url.getPath()));
 } catch (URISyntaxException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1db4788b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
index 91a527d..751b955 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
@@ -22,6 +22,9 @@ import java.net.URLStreamHandlerFactory;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -41,6 +44,18 @@ import org.apache.hadoop.conf.Configuration;
 public class FsUrlStreamHandlerFactory implements
 URLStreamHandlerFactory {
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FsUrlStreamHandlerFactory.class);
+
+  /**
+   * These are the protocols with MUST NOT be exported, as doing so
+   * would conflict with the standard URL handlers registered by
+   * the JVM. Many things will break.
+   */
+  public static final String[] UNEXPORTED_PROTOCOLS = {
+  "http", "https"
+  };
+
   // The configuration holds supported FS implementation class names.
   private Configuration conf;
 
@@ -64,14 +79,20 @@ public class FsUrlStreamHandlerFactory implements
   throw new RuntimeException(io);
 }
 this.handler = new FsUrlStreamHandler(this.conf);
+for (String protocol : UNEXPORTED_PROTOCOLS) {
+  protocols.put(protocol, false);
+}
   }
 
   @Override
   public 

[07/50] [abbrv] hadoop git commit: YARN-6951. Fix debug log when Resource Handler chain is enabled. Contributed by Yang Wang.

2017-08-11 Thread wangda
YARN-6951. Fix debug log when Resource Handler chain is enabled. Contributed by 
Yang Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46b7054f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46b7054f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46b7054f

Branch: refs/heads/YARN-5881
Commit: 46b7054fa7eae9129c21c9f3dc70acff46bfdc41
Parents: d91b7a8
Author: Sunil G 
Authored: Mon Aug 7 13:15:46 2017 +0530
Committer: Sunil G 
Committed: Mon Aug 7 13:15:46 2017 +0530

--
 .../hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46b7054f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 2aaa835..b3e13b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -307,7 +307,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   .getConfiguredResourceHandlerChain(conf);
   if (LOG.isDebugEnabled()) {
 LOG.debug("Resource handler chain enabled = " + (resourceHandlerChain
-== null));
+!= null));
   }
   if (resourceHandlerChain != null) {
 LOG.debug("Bootstrapping resource handler chain");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: YARN-6970. Add PoolInitializationException as retriable exception in FederationFacade. (Giovanni Matteo Fumarola via Subru).

2017-08-11 Thread wangda
YARN-6970. Add PoolInitializationException as retriable exception in 
FederationFacade. (Giovanni Matteo Fumarola via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad2a3506
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad2a3506
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad2a3506

Branch: refs/heads/YARN-5881
Commit: ad2a3506626728a6be47af0db3ca60610a568734
Parents: 1db4788
Author: Subru Krishnan 
Authored: Tue Aug 8 16:48:29 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Aug 8 16:48:29 2017 -0700

--
 .../utils/FederationStateStoreFacade.java   |  2 ++
 .../TestFederationStateStoreFacadeRetry.java| 24 
 2 files changed, 26 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad2a3506/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index 389c769..682eb14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -70,6 +70,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException;
 
 /**
  *
@@ -162,6 +163,7 @@ public final class FederationStateStoreFacade {
 exceptionToPolicyMap.put(FederationStateStoreRetriableException.class,
 basePolicy);
 exceptionToPolicyMap.put(CacheLoaderException.class, basePolicy);
+exceptionToPolicyMap.put(PoolInitializationException.class, basePolicy);
 
 RetryPolicy retryPolicy = RetryPolicies.retryByException(
 RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad2a3506/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
index 304910e..ea43268 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
@@ -30,6 +30,8 @@ import 
org.apache.hadoop.yarn.server.federation.store.exception.FederationStateS
 import org.junit.Assert;
 import org.junit.Test;
 
+import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException;
+
 /**
  * Test class to validate FederationStateStoreFacade retry policy.
  */
@@ -119,4 +121,26 @@ public class TestFederationStateStoreFacadeRetry {
 policy.shouldRetry(new CacheLoaderException(""), maxRetries, 0, false);
 Assert.assertEquals(RetryAction.FAIL.action, action.action);
   }
+
+  /*
+   * Test to validate that PoolInitializationException is a retriable 
exception.
+   */
+  @Test
+  public void testFacadePoolInitRetriableException() throws Exception {
+// PoolInitializationException is a retriable exception
+conf = new Configuration();
+conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
+RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf);
+RetryAction action = policy.shouldRetry(
+new PoolInitializationException(new YarnException()), 0, 0, false);
+// We compare only the action, delay and the reason are random value
+// during this test
+Assert.assertEquals(RetryAction.RETRY.action, 

[21/50] [abbrv] hadoop git commit: MAPREDUCE-6927. MR job should only set tracking url if history was successfully written. Contributed by Eric Badger

2017-08-11 Thread wangda
MAPREDUCE-6927. MR job should only set tracking url if history was successfully 
written. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/735fce5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/735fce5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/735fce5b

Branch: refs/heads/YARN-5881
Commit: 735fce5bec17f4e1799daf922625c475cf588114
Parents: acf9bd8
Author: Jason Lowe 
Authored: Tue Aug 8 14:46:47 2017 -0500
Committer: Jason Lowe 
Committed: Tue Aug 8 14:46:47 2017 -0500

--
 .../jobhistory/JobHistoryEventHandler.java  |  27 +++--
 .../hadoop/mapreduce/v2/app/AppContext.java |   4 +
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  11 ++
 .../mapreduce/v2/app/rm/RMCommunicator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  | 102 +++
 .../hadoop/mapreduce/v2/app/MockAppContext.java |  10 ++
 .../mapreduce/v2/app/TestRuntimeEstimators.java |  10 ++
 .../hadoop/mapreduce/v2/hs/JobHistory.java  |  10 ++
 8 files changed, 168 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/735fce5b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 285d36e..53fe055 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -63,6 +63,7 @@ import 
org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
+import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.StringUtils;
@@ -1404,7 +1405,12 @@ public class JobHistoryEventHandler extends 
AbstractService
 qualifiedDoneFile =
 doneDirFS.makeQualified(new Path(doneDirPrefixPath,
 doneJobHistoryFileName));
-moveToDoneNow(qualifiedLogFile, qualifiedDoneFile);
+if(moveToDoneNow(qualifiedLogFile, qualifiedDoneFile)) {
+  String historyUrl = MRWebAppUtil.getApplicationWebURLOnJHSWithScheme(
+  getConfig(), context.getApplicationID());
+  context.setHistoryUrl(historyUrl);
+  LOG.info("Set historyUrl to " + historyUrl);
+}
   }
 
   // Move confFile to Done Folder
@@ -1610,7 +1616,7 @@ public class JobHistoryEventHandler extends 
AbstractService
 }
   }
 
-  private void moveTmpToDone(Path tmpPath) throws IOException {
+  protected void moveTmpToDone(Path tmpPath) throws IOException {
 if (tmpPath != null) {
   String tmpFileName = tmpPath.getName();
   String fileName = getFileNameFromTmpFN(tmpFileName);
@@ -1622,7 +1628,9 @@ public class JobHistoryEventHandler extends 
AbstractService
   
   // TODO If the FS objects are the same, this should be a rename instead of a
   // copy.
-  private void moveToDoneNow(Path fromPath, Path toPath) throws IOException {
+  protected boolean moveToDoneNow(Path fromPath, Path toPath)
+  throws IOException {
+boolean success = false;
 // check if path exists, in case of retries it may not exist
 if (stagingDirFS.exists(fromPath)) {
   LOG.info("Copying " + fromPath.toString() + " to " + toPath.toString());
@@ -1631,13 +1639,18 @@ public class JobHistoryEventHandler extends 
AbstractService
   boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath,
   false, getConfig());
 
-  if (copied)
-LOG.info("Copied to done location: " + toPath);
-  else 
-LOG.info("copy failed");
   doneDirFS.setPermission(toPath, new FsPermission(
   JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS));
+  if (copied) {
+LOG.info("Copied from: " + fromPath.toString()
++ " to done location: " + toPath.toString());
+success = true;
+  } 

[42/50] [abbrv] hadoop git commit: HADOOP-14743. CompositeGroupsMapping should not swallow exceptions. Contributed by Wei-Chiu Chuang.

2017-08-11 Thread wangda
HADOOP-14743. CompositeGroupsMapping should not swallow exceptions. Contributed 
by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8b75466
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8b75466
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8b75466

Branch: refs/heads/YARN-5881
Commit: a8b75466b21edfe8b12beb4420492817f0e03147
Parents: 54356b1
Author: Wei-Chiu Chuang 
Authored: Thu Aug 10 09:35:27 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Aug 10 09:35:27 2017 -0700

--
 .../java/org/apache/hadoop/security/CompositeGroupsMapping.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8b75466/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
index b8cfdf7..b762df2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
@@ -74,7 +74,9 @@ public class CompositeGroupsMapping
   try {
 groups = provider.getGroups(user);
   } catch (Exception e) {
-//LOG.warn("Exception trying to get groups for user " + user, e);  
+LOG.warn("Unable to get groups for user {} via {} because: {}",
+user, provider.getClass().getSimpleName(), e.toString());
+LOG.debug("Stacktrace: ", e);
   }
   if (groups != null && ! groups.isEmpty()) {
 groupSet.addAll(groups);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: YARN-6958. Moving logging APIs over to slf4j in hadoop-yarn-server-timelineservice. Contributed by Yeliang Cang.

2017-08-11 Thread wangda
YARN-6958. Moving logging APIs over to slf4j in 
hadoop-yarn-server-timelineservice. Contributed by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63cfcb90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63cfcb90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63cfcb90

Branch: refs/heads/YARN-5881
Commit: 63cfcb90ac6fbb79ba9ed6b3044cd999fc74e58c
Parents: 69afa26
Author: Akira Ajisaka 
Authored: Wed Aug 9 23:58:22 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 9 23:58:22 2017 +0900

--
 .../server/timeline/LevelDBCacheTimelineStore.java| 14 +++---
 .../reader/filter/TimelineFilterUtils.java|  7 ---
 .../storage/HBaseTimelineReaderImpl.java  |  8 
 .../storage/HBaseTimelineWriterImpl.java  |  8 
 .../storage/TimelineSchemaCreator.java|  7 ---
 .../storage/application/ApplicationTable.java |  7 ---
 .../storage/apptoflow/AppToFlowTable.java |  7 ---
 .../timelineservice/storage/common/ColumnHelper.java  |  8 +---
 .../storage/common/HBaseTimelineStorageUtils.java |  8 
 .../timelineservice/storage/entity/EntityTable.java   |  7 ---
 .../storage/flow/FlowActivityTable.java   |  7 ---
 .../storage/flow/FlowRunCoprocessor.java  |  7 ---
 .../timelineservice/storage/flow/FlowRunTable.java|  7 ---
 .../timelineservice/storage/flow/FlowScanner.java |  7 ---
 .../storage/reader/TimelineEntityReader.java  |  7 ---
 .../collector/AppLevelTimelineCollector.java  |  7 ---
 .../collector/NodeTimelineCollectorManager.java   |  8 
 .../PerNodeTimelineCollectorsAuxService.java  | 10 +-
 .../timelineservice/collector/TimelineCollector.java  |  7 ---
 .../collector/TimelineCollectorManager.java   |  8 
 .../collector/TimelineCollectorWebService.java|  8 
 .../timelineservice/reader/TimelineReaderServer.java  |  9 +
 .../reader/TimelineReaderWebServices.java |  8 
 .../storage/FileSystemTimelineReaderImpl.java |  8 
 .../storage/common/TimelineStorageUtils.java  |  4 
 25 files changed, 102 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63cfcb90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
index 7379dd6..f7a3d01 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.yarn.server.timeline;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -34,6 +32,8 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBIterator;
 import org.iq80.leveldb.Options;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -58,8 +58,8 @@ import java.util.Map;
 @Private
 @Unstable
 public class LevelDBCacheTimelineStore extends KeyValueBasedTimelineStore {
-  private static final Log LOG
-  = LogFactory.getLog(LevelDBCacheTimelineStore.class);
+  private static final Logger LOG
+  = LoggerFactory.getLogger(LevelDBCacheTimelineStore.class);
   private static final String CACHED_LDB_FILE_PREFIX = "-timeline-cache.ldb";
   private String dbId;
   private DB entityDb;
@@ -102,7 +102,7 @@ public class LevelDBCacheTimelineStore extends 
KeyValueBasedTimelineStore {
 localFS.setPermission(dbPath, LeveldbUtils.LEVELDB_DIR_UMASK);
   }
 } finally {
-  IOUtils.cleanup(LOG, 

[22/50] [abbrv] hadoop git commit: YARN-6726. Fix issues with docker commands executed by container-executor. (Shane Kumpf via wangda)

2017-08-11 Thread wangda
YARN-6726. Fix issues with docker commands executed by container-executor. 
(Shane Kumpf via wangda)

Change-Id: If1b1827345f98f0a49cc7e39d1ba41fbeed5e911


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1794de3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1794de3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1794de3e

Branch: refs/heads/YARN-5881
Commit: 1794de3ea4bbd6863fb43dbae9f5a46b6e4230a0
Parents: 735fce5
Author: Wangda Tan 
Authored: Tue Aug 8 12:56:29 2017 -0700
Committer: Wangda Tan 
Committed: Tue Aug 8 12:56:29 2017 -0700

--
 .../src/CMakeLists.txt  |   1 +
 .../impl/container-executor.c   |  78 +++-
 .../impl/container-executor.h   |  17 ++-
 .../impl/utils/string-utils.c   |  86 ++
 .../impl/utils/string-utils.h   |  32 +
 .../test/test-container-executor.c  | 119 ++-
 6 files changed, 327 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1794de3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index f7fe83d..5b52536 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -89,6 +89,7 @@ add_library(container
 main/native/container-executor/impl/configuration.c
 main/native/container-executor/impl/container-executor.c
 main/native/container-executor/impl/get_executable.c
+main/native/container-executor/impl/utils/string-utils.c
 )
 
 add_executable(container-executor

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1794de3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 99f7b56..def628e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -18,6 +18,7 @@
 
 #include "configuration.h"
 #include "container-executor.h"
+#include "utils/string-utils.h"
 
 #include 
 #include 
@@ -40,6 +41,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "config.h"
 
@@ -79,6 +81,11 @@ static const char* TC_READ_STATS_OPTS [] = { "-s",  "-b", 
NULL};
 //struct to store the user details
 struct passwd *user_detail = NULL;
 
+//Docker container related constants.
+static const char* DOCKER_CONTAINER_NAME_PREFIX = "container_";
+static const char* DOCKER_CLIENT_CONFIG_ARG = "--config=";
+static const char* DOCKER_PULL_COMMAND = "pull";
+
 FILE* LOGFILE = NULL;
 FILE* ERRORFILE = NULL;
 
@@ -1208,6 +1215,27 @@ char** tokenize_docker_command(const char *input, int 
*split_counter) {
   return linesplit;
 }
 
+int execute_regex_match(const char *regex_str, const char *input) {
+  regex_t regex;
+  int regex_match;
+  if (0 != regcomp(, regex_str, REG_EXTENDED|REG_NOSUB)) {
+fprintf(LOGFILE, "Unable to compile regex.");
+fflush(LOGFILE);
+exit(ERROR_COMPILING_REGEX);
+  }
+  regex_match = regexec(, input, (size_t) 0, NULL, 0);
+  regfree();
+  if(0 == regex_match) {
+return 0;
+  }
+  return 1;
+}
+
+int validate_docker_image_name(const char *image_name) {
+  char *regex_str = 
"^(([a-zA-Z0-9.-]+)(:[0-9]+)?/)?([a-z0-9_./-]+)(:[a-zA-Z0-9_.-]+)?$";
+  return execute_regex_match(regex_str, image_name);
+}
+
 char* sanitize_docker_command(const char *line) {
   static struct option long_options[] = {
 {"name", required_argument, 0, 'n' },
@@ -1222,6 +1250,7 @@ char* sanitize_docker_command(const char *line) {
 {"cap-drop", required_argument, 0, 'o' },
 {"device", required_argument, 0, 'i' },
 {"detach", required_argument, 0, 't' },
+{"format", required_argument, 0, 'f' },
 {0, 0, 0, 0}
   };

[49/50] [abbrv] hadoop git commit: YARN-6471. Support to add min/max resource configuration for a queue. (Sunil G via wangda)

2017-08-11 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/95a81934/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index f6ada4f..5b529d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.AccessType;
@@ -45,7 +44,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedContainerChangeRequest;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger;
@@ -60,6 +58,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaS
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils;
+import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 import java.io.IOException;
@@ -69,6 +68,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 @Private
 @Evolving
@@ -163,31 +163,78 @@ public class ParentQueue extends AbstractCSQueue {
   writeLock.lock();
   // Validate
   float childCapacities = 0;
+  Resource minResDefaultLabel = Resources.createResource(0, 0);
   for (CSQueue queue : childQueues) {
 childCapacities += queue.getCapacity();
+Resources.addTo(minResDefaultLabel, queue.getQueueResourceQuotas()
+.getConfiguredMinResource());
+
+// If any child queue is using percentage based capacity model vs 
parent
+// queues' absolute configuration or vice versa, throw back an
+// exception.
+if (!queueName.equals("root") && getCapacity() != 0f
+&& !queue.getQueueResourceQuotas().getConfiguredMinResource()
+.equals(Resources.none())) {
+  throw new IllegalArgumentException("Parent queue '" + getQueueName()
+  + "' and child queue '" + queue.getQueueName()
+  + "' should use either percentage based capacity"
+  + " configuration or absolute resource together.");
+}
   }
+
   float delta = Math.abs(1.0f - childCapacities);  // crude way to check
   // allow capacities being set to 0, and enforce child 0 if parent is 0
-  if (((queueCapacities.getCapacity() > 0) && (delta > PRECISION)) || (
-  (queueCapacities.getCapacity() == 0) && (childCapacities > 0))) {
-throw new IllegalArgumentException(
-"Illegal" + " capacity of " + childCapacities
-+ " for children of queue " + queueName);
+  if ((minResDefaultLabel.equals(Resources.none())
+  && (queueCapacities.getCapacity() > 0) && (delta > PRECISION))
+  || ((queueCapacities.getCapacity() == 0) && (childCapacities > 0))) {
+throw new IllegalArgumentException("Illegal" + " capacity of "
++ childCapacities + " for children of queue " + queueName);
   }
   // check label capacities
   for (String nodeLabel : 

[39/50] [abbrv] hadoop git commit: MAPREDUCE-6923. Optimize MapReduce Shuffle I/O for small partitions. Contributed by Robert Schmidtke.

2017-08-11 Thread wangda
MAPREDUCE-6923. Optimize MapReduce Shuffle I/O for small partitions. 
Contributed by Robert Schmidtke.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac7d0604
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac7d0604
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac7d0604

Branch: refs/heads/YARN-5881
Commit: ac7d0604bc73c0925eff240ad9837e14719d57b7
Parents: b5c02f9
Author: Ravi Prakash 
Authored: Wed Aug 9 15:39:52 2017 -0700
Committer: Ravi Prakash 
Committed: Wed Aug 9 15:39:52 2017 -0700

--
 .../main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac7d0604/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
index cb9b5e0..79045f9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
@@ -111,7 +111,10 @@ public class FadvisedFileRegion extends DefaultFileRegion {
 
 long trans = actualCount;
 int readSize;
-ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize);
+ByteBuffer byteBuffer = ByteBuffer.allocate(
+Math.min(
+this.shuffleBufferSize,
+trans > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) trans));
 
 while(trans > 0L &&
 (readSize = fileChannel.read(byteBuffer, this.position+position)) > 0) 
{


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HDFS-12117. HttpFS does not seem to support SNAPSHOT related methods for WebHDFS REST Interface. Contributed by Wellington Chevreuil.

2017-08-11 Thread wangda
HDFS-12117. HttpFS does not seem to support SNAPSHOT related methods for 
WebHDFS REST Interface. Contributed by Wellington Chevreuil.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a4bff02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a4bff02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a4bff02

Branch: refs/heads/YARN-5881
Commit: 8a4bff02c1534c6bf529726f2bbe414ac4c172e8
Parents: 9a3c237
Author: Wei-Chiu Chuang 
Authored: Tue Aug 8 23:58:53 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 8 23:58:53 2017 -0700

--
 .../hadoop/fs/http/client/HttpFSFileSystem.java |  47 ++-
 .../hadoop/fs/http/server/FSOperations.java | 105 ++
 .../http/server/HttpFSParametersProvider.java   |  45 ++
 .../hadoop/fs/http/server/HttpFSServer.java |  36 +
 .../fs/http/client/BaseTestHttpFSWith.java  | 110 ++-
 .../hadoop/fs/http/server/TestHttpFSServer.java | 140 ++-
 6 files changed, 479 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a4bff02/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index d139100..1059a02 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -124,6 +124,8 @@ public class HttpFSFileSystem extends FileSystem
   public static final String POLICY_NAME_PARAM = "storagepolicy";
   public static final String OFFSET_PARAM = "offset";
   public static final String LENGTH_PARAM = "length";
+  public static final String SNAPSHOT_NAME_PARAM = "snapshotname";
+  public static final String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname";
 
   public static final Short DEFAULT_PERMISSION = 0755;
   public static final String ACLSPEC_DEFAULT = "";
@@ -144,6 +146,8 @@ public class HttpFSFileSystem extends FileSystem
 
   public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
 
+  public static final String SNAPSHOT_JSON = "Path";
+
   public enum FILE_TYPE {
 FILE, DIRECTORY, SYMLINK;
 
@@ -229,7 +233,9 @@ public class HttpFSFileSystem extends FileSystem
 DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET),
 REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET), LISTSTATUS_BATCH(HTTP_GET),
 GETALLSTORAGEPOLICY(HTTP_GET), GETSTORAGEPOLICY(HTTP_GET),
-SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST);
+SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST),
+CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE),
+RENAMESNAPSHOT(HTTP_PUT);
 
 private String httpMethod;
 
@@ -1434,4 +1440,43 @@ public class HttpFSFileSystem extends FileSystem
 Operation.UNSETSTORAGEPOLICY.getMethod(), params, src, true);
 HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
+
+  @Override
+  public final Path createSnapshot(Path path, String snapshotName)
+  throws IOException {
+Map params = new HashMap();
+params.put(OP_PARAM, Operation.CREATESNAPSHOT.toString());
+if (snapshotName != null) {
+  params.put(SNAPSHOT_NAME_PARAM, snapshotName);
+}
+HttpURLConnection conn = 
getConnection(Operation.CREATESNAPSHOT.getMethod(),
+params, path, true);
+HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+return new Path((String) json.get(SNAPSHOT_JSON));
+  }
+
+  @Override
+  public void renameSnapshot(Path path, String snapshotOldName,
+ String snapshotNewName) throws IOException {
+Map params = new HashMap();
+params.put(OP_PARAM, Operation.RENAMESNAPSHOT.toString());
+params.put(SNAPSHOT_NAME_PARAM, snapshotNewName);
+params.put(OLD_SNAPSHOT_NAME_PARAM, snapshotOldName);
+HttpURLConnection conn = 
getConnection(Operation.RENAMESNAPSHOT.getMethod(),
+params, path, true);
+HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  @Override
+  public void deleteSnapshot(Path path, String snapshotName)
+  throws IOException {
+Map params = new HashMap();
+params.put(OP_PARAM, Operation.DELETESNAPSHOT.toString());
+ 

[08/50] [abbrv] hadoop git commit: HDFS-12198. Document missing namenode metrics that were added recently. Contributed by Yiqun Lin.

2017-08-11 Thread wangda
HDFS-12198. Document missing namenode metrics that were added recently. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4eb7016
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4eb7016
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4eb7016

Branch: refs/heads/YARN-5881
Commit: a4eb7016cb20dfbc656b831c603136785e62fddc
Parents: 46b7054
Author: Akira Ajisaka 
Authored: Mon Aug 7 18:47:33 2017 +0900
Committer: Akira Ajisaka 
Committed: Mon Aug 7 18:47:33 2017 +0900

--
 .../hadoop-common/src/site/markdown/Metrics.md  | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4eb7016/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 852a1e9..4543fac 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -145,6 +145,9 @@ Each metrics record contains tags such as ProcessName, 
SessionId, and Hostname a
 | `CreateSymlinkOps` | Total number of createSymlink operations |
 | `GetLinkTargetOps` | Total number of getLinkTarget operations |
 | `FilesInGetListingOps` | Total number of files and directories listed by 
directory listing operations |
+| `SuccessfulReReplications` | Total number of successful block 
re-replications |
+| `NumTimesReReplicationNotScheduled` | Total number of times that failed to 
schedule a block re-replication |
+| `TimeoutReReplications` | Total number of timed out block re-replications |
 | `AllowSnapshotOps` | Total number of allowSnapshot operations |
 | `DisallowSnapshotOps` | Total number of disallowSnapshot operations |
 | `CreateSnapshotOps` | Total number of createSnapshot operations |
@@ -157,8 +160,8 @@ Each metrics record contains tags such as ProcessName, 
SessionId, and Hostname a
 | `SyncsNumOps` | Total number of Journal syncs |
 | `SyncsAvgTime` | Average time of Journal syncs in milliseconds |
 | `TransactionsBatchedInSync` | Total number of Journal transactions batched 
in sync |
-| `BlockReportNumOps` | Total number of processing block reports from DataNode 
|
-| `BlockReportAvgTime` | Average time of processing block reports in 
milliseconds |
+| `StorageBlockReportNumOps` | Total number of processing block reports from 
individual storages in DataNode |
+| `StorageBlockReportAvgTime` | Average time of processing block reports in 
milliseconds |
 | `CacheReportNumOps` | Total number of processing cache reports from DataNode 
|
 | `CacheReportAvgTime` | Average time of processing cache reports in 
milliseconds |
 | `SafeModeTime` | The interval between FSNameSystem starts and the last time 
safemode leaves in milliseconds.  (sometimes not equal to the time in 
SafeMode, see [HDFS-5156](https://issues.apache.org/jira/browse/HDFS-5156)) |
@@ -176,6 +179,8 @@ Each metrics record contains tags such as ProcessName, 
SessionId, and Hostname a
 | `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds |
 | `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
 | `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
+| `ResourceCheckTime`*num*`s(50|75|90|95|99)thPercentileLatency` | The 
50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. 
Percentile measurement is off by default, by watching no intervals. The 
intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `StorageBlockReport`*num*`s(50|75|90|95|99)thPercentileLatency` | The 
50/75/90/95/99th percentile of storage block report latency in milliseconds. 
Percentile measurement is off by default, by watching no intervals. The 
intervals are specified by `dfs.metrics.percentiles.intervals`. |
 
 FSNamesystem
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: YARN-6033. Add support for sections in container-executor configuration file. (Varun Vasudev via wandga)

2017-08-11 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec694145/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc
new file mode 100644
index 000..6ee0ab2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc
@@ -0,0 +1,432 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+
+extern "C" {
+#include "util.h"
+#include "configuration.h"
+#include "configuration.c"
+}
+
+
+namespace ContainerExecutor {
+  class TestConfiguration : public ::testing::Test {
+  protected:
+virtual void SetUp() {
+  new_config_format_file = "test-configurations/configuration-1.cfg";
+  old_config_format_file = "test-configurations/old-config.cfg";
+  mixed_config_format_file = "test-configurations/configuration-2.cfg";
+  loadConfigurations();
+  return;
+}
+
+void loadConfigurations() {
+  int ret = 0;
+  ret = read_config(new_config_format_file.c_str(), _config_format);
+  ASSERT_EQ(0, ret);
+  ret = read_config(old_config_format_file.c_str(), _config_format);
+  ASSERT_EQ(0, ret);
+  ret = read_config(mixed_config_format_file.c_str(),
+_config_format);
+  ASSERT_EQ(0, ret);
+}
+
+virtual void TearDown() {
+  free_configuration(_config_format);
+  free_configuration(_config_format);
+  return;
+}
+
+std::string new_config_format_file;
+std::string old_config_format_file;
+std::string mixed_config_format_file;
+struct configuration new_config_format;
+struct configuration old_config_format;
+struct configuration mixed_config_format;
+  };
+
+
+  TEST_F(TestConfiguration, test_get_configuration_values_delimiter) {
+char **split_values;
+split_values = get_configuration_values_delimiter(NULL, "", 
_config_format, "%");
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", NULL,
+  _config_format, "%");
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", "",
+  NULL, "%");
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", "",
+  _config_format, NULL);
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", 
"abcd",
+  _config_format, "%");
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", "",
+  _config_format, "%");
+ASSERT_STREQ("/var/run/yarn", split_values[0]);
+ASSERT_STREQ("/tmp/mydir", split_values[1]);
+ASSERT_EQ(NULL, split_values[2]);
+free(split_values);
+split_values = get_configuration_values_delimiter("allowed.system.users",
+  "", _config_format, "%");
+ASSERT_STREQ("nobody,daemon", split_values[0]);
+ASSERT_EQ(NULL, split_values[1]);
+free(split_values);
+  }
+
+  TEST_F(TestConfiguration, test_get_configuration_values) {
+char **split_values;
+split_values = get_configuration_values(NULL, "", _config_format);
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values("yarn.local.dirs", NULL, 
_config_format);
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values("yarn.local.dirs", "", NULL);
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values("yarn.local.dirs", "abcd", 
_config_format);
+ASSERT_EQ(NULL, split_values);
+split_values = 

[02/50] [abbrv] hadoop git commit: HDFS-12251. Add document for StreamCapabilities. (Lei (Eddy) Xu)

2017-08-11 Thread wangda
HDFS-12251. Add document for StreamCapabilities. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe334178
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe334178
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe334178

Branch: refs/heads/YARN-5881
Commit: fe3341786a0d61f404127bf21d1afc85b2f21d38
Parents: a6fdeb8
Author: Lei Xu 
Authored: Fri Aug 4 11:21:58 2017 -0700
Committer: Lei Xu 
Committed: Fri Aug 4 11:21:58 2017 -0700

--
 .../src/site/markdown/filesystem/filesystem.md  | 24 
 .../src/site/markdown/HDFSErasureCoding.md  | 19 
 2 files changed, 43 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe334178/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index b5c..d7e57ce 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -1210,3 +1210,27 @@ try {
 It is notable that this is *not* done in the Hadoop codebase. This does not 
imply
 that robust loops are not recommended —more that the concurrency
 problems were not considered during the implementation of these loops.
+
+
+##  interface `StreamCapabilities`
+
+The `StreamCapabilities` provides a way to programmatically query the
+capabilities that an `OutputStream` supports.
+
+```java
+public interface StreamCapabilities {
+  boolean hasCapability(String capability);
+}
+```
+
+### `boolean hasCapability(capability)`
+
+Return true if the `OutputStream` has the desired capability.
+
+The caller can query the capabilities of a stream using a string value.
+It currently supports to query:
+
+ * `StreamCapabilties.HFLUSH` ("*hflush*"): the capability to flush out the 
data
+ in client's buffer.
+ * `StreamCapabilities.HSYNC` ("*hsync*"): capability to flush out the data in
+ client's buffer and the disk device.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe334178/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 1c0a2de..88293ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -199,3 +199,22 @@ Below are the details about each command.
 *  `[-disablePolicy -policy ]`
 
  Disable an erasure coding policy.
+
+Limitations
+---
+
+Certain HDFS file write operations, i.e., `hflush`, `hsync` and `append`,
+are not supported on erasure coded files due to substantial technical
+challenges.
+
+* `append()` on an erasure coded file will throw `IOException`.
+* `hflush()` and `hsync()` on `DFSStripedOutputStream` are no-op. Thus calling
+`hflush()` or `hsync()` on an erasure coded file can not guarantee data
+being persistent.
+
+A client can use 
[`StreamCapabilities`](../hadoop-common/filesystem/filesystem.html#interface_StreamCapabilities)
+API to query whether a `OutputStream` supports `hflush()` and `hsync()`.
+If the client desires data persistence via `hflush()` and `hsync()`, the 
current
+remedy is creating such files as regular 3x replication files in a
+non-erasure-coded directory, or using `FSDataOutputStreamBuilder#replicate()`
+API to create 3x replication files in an erasure-coded directory.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-6920. Fix resource leak that happens during container re-initialization. (asuresh)

2017-08-11 Thread wangda
YARN-6920. Fix resource leak that happens during container re-initialization. 
(asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d3fd819
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d3fd819
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d3fd819

Branch: refs/heads/YARN-5881
Commit: 8d3fd81980275fa81e7a5539b1751f38a63b6911
Parents: c61f2c4
Author: Arun Suresh 
Authored: Mon Aug 7 18:59:25 2017 -0700
Committer: Arun Suresh 
Committed: Mon Aug 7 18:59:25 2017 -0700

--
 .../yarn/client/api/impl/TestNMClient.java  | 37 +---
 .../container/ContainerImpl.java|  4 +++
 .../scheduler/ContainerScheduler.java   |  4 +++
 .../containermanager/TestContainerManager.java  |  9 +
 4 files changed, 34 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3fd819/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
index 1034f7e..6bd0816 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
@@ -398,6 +398,8 @@ public class TestNMClient {
   "will be Rolled-back", Arrays.asList(new Integer[] {-1000}));
   testCommitContainer(container.getId(), true);
   testReInitializeContainer(container.getId(), clc, false);
+  testGetContainerStatus(container, i, ContainerState.RUNNING,
+  "will be Re-initialized", Arrays.asList(new Integer[] {-1000}));
   testCommitContainer(container.getId(), false);
 } else {
   testReInitializeContainer(container.getId(), clc, true);
@@ -449,24 +451,21 @@ public class TestNMClient {
   ContainerState state, String diagnostics, List exitStatuses)
   throws YarnException, IOException {
 while (true) {
-  try {
-ContainerStatus status = nmClient.getContainerStatus(
-container.getId(), container.getNodeId());
-// NodeManager may still need some time to get the stable
-// container status
-if (status.getState() == state) {
-  assertEquals(container.getId(), status.getContainerId());
-  assertTrue("" + index + ": " + status.getDiagnostics(),
-  status.getDiagnostics().contains(diagnostics));
-  
-  assertTrue("Exit Statuses are supposed to be in: " + exitStatuses +
-  ", but the actual exit status code is: " + 
status.getExitStatus(),
-  exitStatuses.contains(status.getExitStatus()));
-  break;
-}
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-e.printStackTrace();
+  sleep(250);
+  ContainerStatus status = nmClient.getContainerStatus(
+  container.getId(), container.getNodeId());
+  // NodeManager may still need some time to get the stable
+  // container status
+  if (status.getState() == state) {
+assertEquals(container.getId(), status.getContainerId());
+assertTrue("" + index + ": " + status.getDiagnostics(),
+status.getDiagnostics().contains(diagnostics));
+
+assertTrue("Exit Statuses are supposed to be in: " + exitStatuses +
+", but the actual exit status code is: " +
+status.getExitStatus(),
+exitStatuses.contains(status.getExitStatus()));
+break;
   }
 }
   }
@@ -559,9 +558,7 @@ public class TestNMClient {
   ContainerLaunchContext clc, boolean autoCommit)
   throws YarnException, IOException {
 try {
-  sleep(250);
   nmClient.reInitializeContainer(containerId, clc, autoCommit);
-  sleep(250);
 } catch (YarnException e) {
   // NM container will only be in SCHEDULED state, so expect the increase
   // action to fail.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3fd819/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 

[34/50] [abbrv] hadoop git commit: HDFS-12157. Do fsyncDirectory(..) outside of FSDataset lock. Contributed by inayakumar B.

2017-08-11 Thread wangda
HDFS-12157. Do fsyncDirectory(..) outside of FSDataset lock. Contributed by 
inayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69afa26f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69afa26f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69afa26f

Branch: refs/heads/YARN-5881
Commit: 69afa26f19adad4c630a307c274130eb8b697141
Parents: 1a18d5e
Author: Kihwal Lee 
Authored: Wed Aug 9 09:03:51 2017 -0500
Committer: Kihwal Lee 
Committed: Wed Aug 9 09:03:51 2017 -0500

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 46 ++--
 1 file changed, 24 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69afa26f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 53e2fc6..16df709 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -991,8 +991,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 replicaInfo, smallBufferSize, conf);
 
 // Finalize the copied files
-newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo,
-false);
+newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
 try (AutoCloseableLock lock = datasetLock.acquire()) {
   // Increment numBlocks here as this block moved without knowing to BPS
   FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
@@ -1295,7 +1294,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   replicaInfo.bumpReplicaGS(newGS);
   // finalize the replica if RBW
   if (replicaInfo.getState() == ReplicaState.RBW) {
-finalizeReplica(b.getBlockPoolId(), replicaInfo, false);
+finalizeReplica(b.getBlockPoolId(), replicaInfo);
   }
   return replicaInfo;
 }
@@ -1625,23 +1624,39 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   @Override // FsDatasetSpi
   public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
   throws IOException {
+ReplicaInfo replicaInfo = null;
+ReplicaInfo finalizedReplicaInfo = null;
 try (AutoCloseableLock lock = datasetLock.acquire()) {
   if (Thread.interrupted()) {
 // Don't allow data modifications from interrupted threads
 throw new IOException("Cannot finalize block from Interrupted Thread");
   }
-  ReplicaInfo replicaInfo = getReplicaInfo(b);
+  replicaInfo = getReplicaInfo(b);
   if (replicaInfo.getState() == ReplicaState.FINALIZED) {
 // this is legal, when recovery happens on a file that has
 // been opened for append but never modified
 return;
   }
-  finalizeReplica(b.getBlockPoolId(), replicaInfo, fsyncDir);
+  finalizedReplicaInfo = finalizeReplica(b.getBlockPoolId(), replicaInfo);
+}
+/*
+ * Sync the directory after rename from tmp/rbw to Finalized if
+ * configured. Though rename should be atomic operation, sync on both
+ * dest and src directories are done because IOUtils.fsync() calls
+ * directory's channel sync, not the journal itself.
+ */
+if (fsyncDir && finalizedReplicaInfo instanceof FinalizedReplica
+&& replicaInfo instanceof LocalReplica) {
+  FinalizedReplica finalizedReplica =
+  (FinalizedReplica) finalizedReplicaInfo;
+  finalizedReplica.fsyncDirectory();
+  LocalReplica localReplica = (LocalReplica) replicaInfo;
+  localReplica.fsyncDirectory();
 }
   }
 
-  private ReplicaInfo finalizeReplica(String bpid,
-  ReplicaInfo replicaInfo, boolean fsyncDir) throws IOException {
+  private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo)
+  throws IOException {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
   ReplicaInfo newReplicaInfo = null;
   if (replicaInfo.getState() == ReplicaState.RUR &&
@@ -1656,19 +1671,6 @@ class FsDatasetImpl implements 
FsDatasetSpi {
 
 newReplicaInfo = v.addFinalizedBlock(
 bpid, replicaInfo, replicaInfo, replicaInfo.getBytesReserved());
-/*
- * Sync the directory after rename from tmp/rbw to Finalized if
- * configured. Though rename should be atomic operation, sync 

[46/50] [abbrv] hadoop git commit: HADOOP-14754. TestCommonConfigurationFields failed: core-default.xml has 2 wasb properties missing in classes. Contributed by John Zhuge.

2017-08-11 Thread wangda
HADOOP-14754. TestCommonConfigurationFields failed: core-default.xml has 2 wasb 
properties missing in classes.
Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d964062f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d964062f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d964062f

Branch: refs/heads/YARN-5881
Commit: d964062f66c0772f4b1a029bfcdff921fbaaf91c
Parents: f13ca94
Author: Steve Loughran 
Authored: Fri Aug 11 10:18:17 2017 +0100
Committer: Steve Loughran 
Committed: Fri Aug 11 10:18:17 2017 +0100

--
 .../org/apache/hadoop/conf/TestCommonConfigurationFields.java  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d964062f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index da37e68..d0e0a35 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -103,6 +103,12 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPrefixToSkipCompare.add("fs.s3n.");
 xmlPrefixToSkipCompare.add("s3native.");
 
+// WASB properties are in a different subtree.
+// - org.apache.hadoop.fs.azure.NativeAzureFileSystem
+xmlPrefixToSkipCompare.add("fs.wasb.impl");
+xmlPrefixToSkipCompare.add("fs.wasbs.impl");
+xmlPrefixToSkipCompare.add("fs.azure.");
+
 // ADL properties are in a different subtree
 // - org.apache.hadoop.hdfs.web.ADLConfKeys
 xmlPrefixToSkipCompare.add("adl.");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HADOOP-14355. Update maven-war-plugin to 3.1.0.

2017-08-11 Thread wangda
HADOOP-14355. Update maven-war-plugin to 3.1.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07694fc6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07694fc6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07694fc6

Branch: refs/heads/YARN-5881
Commit: 07694fc65ae6d97a430a7dd67a6277e5795c321f
Parents: ebabc70
Author: Akira Ajisaka 
Authored: Wed Aug 9 13:20:03 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 9 13:20:03 2017 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07694fc6/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 5aabdc7..8151016 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -126,7 +126,7 @@
 2.6
 2.4.3
 2.5
-2.4
+3.1.0
 2.3
 1.2
 
1.5


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: HDFS-12182. BlockManager.metaSave does not distinguish between "under replicated" and "missing" blocks. Contributed by Wellington Chevreuil.

2017-08-11 Thread wangda
HDFS-12182. BlockManager.metaSave does not distinguish between "under 
replicated" and "missing" blocks. Contributed by Wellington Chevreuil.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a3c2379
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a3c2379
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a3c2379

Branch: refs/heads/YARN-5881
Commit: 9a3c2379ef24cdca5153abf4b63fde1131ff8989
Parents: 07694fc
Author: Wei-Chiu Chuang 
Authored: Tue Aug 8 23:43:24 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 8 23:44:18 2017 -0700

--
 .../server/blockmanagement/BlockManager.java| 27 --
 .../blockmanagement/TestBlockManager.java   | 54 
 .../hdfs/server/namenode/TestMetaSave.java  |  2 +
 3 files changed, 79 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a3c2379/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index fc754a0..6129db8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -705,17 +705,36 @@ public class BlockManager implements BlockStatsMXBean {
 datanodeManager.fetchDatanodes(live, dead, false);
 out.println("Live Datanodes: " + live.size());
 out.println("Dead Datanodes: " + dead.size());
+
 //
-// Dump contents of neededReconstruction
+// Need to iterate over all queues from neededReplications
+// except for the QUEUE_WITH_CORRUPT_BLOCKS)
 //
 synchronized (neededReconstruction) {
   out.println("Metasave: Blocks waiting for reconstruction: "
-  + neededReconstruction.size());
-  for (Block block : neededReconstruction) {
+  + neededReconstruction.getLowRedundancyBlockCount());
+  for (int i = 0; i < neededReconstruction.LEVEL; i++) {
+if (i != neededReconstruction.QUEUE_WITH_CORRUPT_BLOCKS) {
+  for (Iterator it = neededReconstruction.iterator(i);
+   it.hasNext();) {
+Block block = it.next();
+dumpBlockMeta(block, out);
+  }
+}
+  }
+  //
+  // Now prints corrupt blocks separately
+  //
+  out.println("Metasave: Blocks currently missing: " +
+  neededReconstruction.getCorruptBlockSize());
+  for (Iterator it = neededReconstruction.
+  iterator(neededReconstruction.QUEUE_WITH_CORRUPT_BLOCKS);
+   it.hasNext();) {
+Block block = it.next();
 dumpBlockMeta(block, out);
   }
 }
-
+
 // Dump any postponed over-replicated blocks
 out.println("Mis-replicated blocks that have been postponed:");
 for (Block block : postponedMisreplicatedBlocks) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a3c2379/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 6b1a979..42aeadf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -1459,4 +1459,58 @@ public class TestBlockManager {
 }
   }
 
+  @Test
+  public void testMetaSaveMissingReplicas() throws Exception {
+List origStorages = getStorages(0, 1);
+List origNodes = getNodes(origStorages);
+BlockInfo block = makeBlockReplicasMissing(0, origNodes);
+File file = new File("test.log");
+PrintWriter out = new PrintWriter(file);
+bm.metaSave(out);
+out.flush();
+FileInputStream fstream = new FileInputStream(file);
+DataInputStream in = new DataInputStream(fstream);
+BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+StringBuffer buffer = new StringBuffer();
+String line;
+try {
+  while ((line = reader.readLine()) != null) {
+buffer.append(line);
+  }
+  String output = 

[43/50] [abbrv] hadoop git commit: HDFS-11957. Enable POSIX ACL inheritance by default. Contributed by John Zhuge.

2017-08-11 Thread wangda
HDFS-11957. Enable POSIX ACL inheritance by default. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/312e57b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/312e57b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/312e57b9

Branch: refs/heads/YARN-5881
Commit: 312e57b95477ec95e6735f5721c646ad1df019f8
Parents: a8b7546
Author: John Zhuge 
Authored: Fri Jun 9 08:42:16 2017 -0700
Committer: John Zhuge 
Committed: Thu Aug 10 10:30:47 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java|  2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml   |  2 +-
 .../src/site/markdown/HdfsPermissionsGuide.md |  2 +-
 .../test/java/org/apache/hadoop/cli/TestAclCLI.java   |  2 ++
 .../hadoop/hdfs/server/namenode/FSAclBaseTest.java|  8 
 .../hdfs/server/namenode/TestFSImageWithAcl.java  | 14 --
 6 files changed, 17 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/312e57b9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index dc9bf76..f4c383e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -269,7 +269,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY =
   "dfs.namenode.posix.acl.inheritance.enabled";
   public static final boolean
-  DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_DEFAULT = false;
+  DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_DEFAULT = true;
   public static final String  DFS_NAMENODE_XATTRS_ENABLED_KEY = 
"dfs.namenode.xattrs.enabled";
   public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
   public static final String  DFS_ADMIN = "dfs.cluster.administrators";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/312e57b9/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 4942967..03becc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -459,7 +459,7 @@
 
   
 dfs.namenode.posix.acl.inheritance.enabled
-false
+true
 
   Set to true to enable POSIX style ACL inheritance. When it is enabled
   and the create request comes from a compatible client, the NameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/312e57b9/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
index c502534..82b5cec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
@@ -322,7 +322,7 @@ Configuration Parameters
 
 *   `dfs.namenode.posix.acl.inheritance.enabled`
 
-Set to true to enable POSIX style ACL inheritance. Disabled by default.
+Set to true to enable POSIX style ACL inheritance. Enabled by default.
 When it is enabled and the create request comes from a compatible client,
 the NameNode will apply default ACLs from the parent directory to
 the create mode and ignore the client umask. If no default ACL is found,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/312e57b9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
index 75111bb..9cf2180 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
@@ -34,6 +34,8 @@ public class TestAclCLI extends CLITestHelperDFS {
 
   protected void initConf() {
 

[47/50] [abbrv] hadoop git commit: HADOOP-10392. Use FileSystem#makeQualified(Path) instead of Path#makeQualified(FileSystem) (ajisakaa via aw)

2017-08-11 Thread wangda
HADOOP-10392. Use FileSystem#makeQualified(Path) instead of 
Path#makeQualified(FileSystem) (ajisakaa via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4222c971
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4222c971
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4222c971

Branch: refs/heads/YARN-5881
Commit: 4222c971080f2b150713727092c7197df58c88e5
Parents: d964062
Author: Allen Wittenauer 
Authored: Fri Aug 11 09:25:56 2017 -0700
Committer: Allen Wittenauer 
Committed: Fri Aug 11 09:25:56 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileUtil.java |  4 +--
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |  4 +--
 .../java/org/apache/hadoop/io/SequenceFile.java |  2 +-
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  6 ++---
 .../java/org/apache/hadoop/io/FileBench.java|  2 +-
 .../mapred/MiniMRClientClusterFactory.java  |  4 +--
 .../mapred/TestCombineFileInputFormat.java  |  6 ++---
 .../TestCombineSequenceFileInputFormat.java |  7 +++--
 .../mapred/TestCombineTextInputFormat.java  |  7 +++--
 .../mapred/TestConcatenatedCompressedInput.java |  6 ++---
 .../org/apache/hadoop/mapred/TestMapRed.java|  4 +--
 .../hadoop/mapred/TestMiniMRChildTask.java  |  4 +--
 .../hadoop/mapred/TestTextInputFormat.java  |  8 +++---
 .../TestWrappedRecordReaderClassloader.java |  4 +--
 .../lib/join/TestWrappedRRClassloader.java  |  4 +--
 .../mapreduce/util/MRAsyncDiskService.java  |  2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  4 +--
 .../v2/TestMRJobsWithHistoryService.java|  4 +--
 .../org/apache/hadoop/tools/HadoopArchives.java |  2 +-
 .../apache/hadoop/mapred/gridmix/Gridmix.java   |  2 +-
 .../hadoop/mapred/gridmix/PseudoLocalFs.java|  8 +-
 .../hadoop/mapred/gridmix/TestFilePool.java |  4 +--
 .../hadoop/mapred/gridmix/TestFileQueue.java|  8 +++---
 .../mapred/gridmix/TestPseudoLocalFs.java   |  2 +-
 .../hadoop/mapred/gridmix/TestUserResolve.java  |  4 +--
 .../hadoop/fs/swift/util/SwiftTestUtils.java|  2 +-
 .../fs/swift/SwiftFileSystemBaseTest.java   |  2 +-
 .../TestSwiftFileSystemPartitionedUploads.java  |  4 +--
 .../hadoop/tools/rumen/TestHistograms.java  |  6 ++---
 .../org/apache/hadoop/streaming/StreamJob.java  | 27 ++--
 30 files changed, 78 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4222c971/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index eb8a5c3..72b9615 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -295,8 +295,8 @@ public class FileUtil {
 Path dst)
 throws IOException {
 if (srcFS == dstFS) {
-  String srcq = src.makeQualified(srcFS).toString() + Path.SEPARATOR;
-  String dstq = dst.makeQualified(dstFS).toString() + Path.SEPARATOR;
+  String srcq = srcFS.makeQualified(src).toString() + Path.SEPARATOR;
+  String dstq = dstFS.makeQualified(dst).toString() + Path.SEPARATOR;
   if (dstq.startsWith(srcq)) {
 if (srcq.length() == dstq.length()) {
   throw new IOException("Cannot copy " + src + " to itself.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4222c971/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 4c1236b..644cf4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -505,7 +505,7 @@ public class FTPFileSystem extends FileSystem {
   long modTime = -1; // Modification time of root dir not known.
   Path root = new Path("/");
   return new FileStatus(length, isDir, blockReplication, blockSize,
-  modTime, root.makeQualified(this));
+  modTime, this.makeQualified(root));
 }
 String pathName = parentPath.toUri().getPath();
 FTPFile[] ftpFiles = client.listFiles(pathName);
@@ -546,7 

[41/50] [abbrv] hadoop git commit: HADOOP-14183. Remove service loader config file for wasb fs. Contributed by Esfandiar Manii.

2017-08-11 Thread wangda
HADOOP-14183. Remove service loader config file for wasb fs.
Contributed by Esfandiar Manii.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54356b1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54356b1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54356b1e

Branch: refs/heads/YARN-5881
Commit: 54356b1e8366a23fff1bb45601efffc743306efc
Parents: 8d953c2
Author: Steve Loughran 
Authored: Thu Aug 10 16:46:33 2017 +0100
Committer: Steve Loughran 
Committed: Thu Aug 10 16:46:33 2017 +0100

--
 .../src/main/resources/core-default.xml| 12 
 .../services/org.apache.hadoop.fs.FileSystem   | 17 -
 2 files changed, 12 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54356b1e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 593fd85..e6b6919 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1322,6 +1322,18 @@
 
 
 
+  fs.wasb.impl
+  org.apache.hadoop.fs.azure.NativeAzureFileSystem
+  The implementation class of the Native Azure 
Filesystem
+
+
+
+  fs.wasbs.impl
+  org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure
+  The implementation class of the Secure Native Azure 
Filesystem
+
+
+
   fs.azure.secure.mode
   false
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54356b1e/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 9f4922b..000
--- 
a/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.azure.NativeAzureFileSystem
-org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: YARN-6631. Refactor loader.js in new Yarn UI. Contributed by Akhil P B.

2017-08-11 Thread wangda
YARN-6631. Refactor loader.js in new Yarn UI. Contributed by Akhil P B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d953c23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d953c23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d953c23

Branch: refs/heads/YARN-5881
Commit: 8d953c2359c5b12cf5b1f3c14be3ff5bb74242d0
Parents: ac7d060
Author: Sunil G 
Authored: Thu Aug 10 11:53:26 2017 +0530
Committer: Sunil G 
Committed: Thu Aug 10 11:53:26 2017 +0530

--
 .../src/main/webapp/app/initializers/loader.js  | 42 +---
 1 file changed, 19 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d953c23/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index aa8fb07..55f6e1b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -20,25 +20,27 @@
 
 import Ember from 'ember';
 
-function getTimeLineURL() {
-  return '/conf?name=yarn.timeline-service.webapp.address';
+function getTimeLineURL(rmhost) {
+  var url = window.location.protocol + '//' +
+(ENV.hosts.localBaseAddress? ENV.hosts.localBaseAddress + '/' : '') + 
rmhost;
+
+  url += '/conf?name=yarn.timeline-service.webapp.address';
+  Ember.Logger.log("Get Timeline Address URL: " + url);
+  return url;
 }
 
 function updateConfigs(application) {
   var hostname = window.location.hostname;
-  var rmhost = hostname +
-(window.location.port ? ':' + window.location.port: '');
-
-  Ember.Logger.log("RM Address:" + rmhost);
+  var rmhost = hostname + (window.location.port ? ':' + window.location.port: 
'');
 
   if(!ENV.hosts.rmWebAddress) {
-ENV = {
-   hosts: {
-  rmWebAddress: rmhost,
-},
-};
+ENV.hosts.rmWebAddress = rmhost;
+  } else {
+rmhost = ENV.hosts.rmWebAddress;
   }
 
+  Ember.Logger.log("RM Address: " + rmhost);
+
   if(!ENV.hosts.timelineWebAddress) {
 var timelinehost = "";
 $.ajax({
@@ -46,7 +48,7 @@ function updateConfigs(application) {
   dataType: 'json',
   async: true,
   context: this,
-  url: getTimeLineURL(),
+  url: getTimeLineURL(rmhost),
   success: function(data) {
 timelinehost = data.property.value;
 ENV.hosts.timelineWebAddress = timelinehost;
@@ -54,24 +56,18 @@ function updateConfigs(application) {
 var address = timelinehost.split(":")[0];
 var port = timelinehost.split(":")[1];
 
-Ember.Logger.log("Timeline Address from RM:" + address + ":" + port);
+Ember.Logger.log("Timeline Address from RM: " + timelinehost);
 
 if(address === "0.0.0.0" || address === "localhost") {
   var updatedAddress =  hostname + ":" + port;
-
-  /* Timeline v2 is not supporting CORS, so make as default*/
-  ENV = {
- hosts: {
-rmWebAddress: rmhost,
-timelineWebAddress: updatedAddress,
-  },
-  };
-  Ember.Logger.log("Timeline Updated Address:" + updatedAddress);
+  ENV.hosts.timelineWebAddress = updatedAddress;
+  Ember.Logger.log("Timeline Updated Address: " + updatedAddress);
 }
 application.advanceReadiness();
-  },
+  }
 });
   } else {
+Ember.Logger.log("Timeline Address: " + ENV.hosts.timelineWebAddress);
 application.advanceReadiness();
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: HADOOP-14715. TestWasbRemoteCallHelper failing. Contributed by Esfandiar Manii.

2017-08-11 Thread wangda
HADOOP-14715. TestWasbRemoteCallHelper failing.
Contributed by Esfandiar Manii.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4e1aa05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4e1aa05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4e1aa05

Branch: refs/heads/YARN-5881
Commit: f4e1aa0508cadcf9d4ecc4053d8c1cf6ddd6c31b
Parents: 71b8dda
Author: Steve Loughran 
Authored: Tue Aug 8 23:37:47 2017 +0100
Committer: Steve Loughran 
Committed: Tue Aug 8 23:37:47 2017 +0100

--
 .../apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java |  7 +--
 .../hadoop-azure/src/test/resources/azure-test.xml   | 11 +++
 2 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4e1aa05/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
index 393dcfd..8aad9e9 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
@@ -282,6 +282,8 @@ public class TestWasbRemoteCallHelper
   @Test
   public void testWhenOneInstanceIsDown() throws Throwable {
 
+boolean isAuthorizationCachingEnabled = 
fs.getConf().getBoolean(CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE, 
false);
+
 // set up mocks
 HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
 HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class);
@@ -356,8 +358,9 @@ public class TestWasbRemoteCallHelper
 
 performop(mockHttpClient);
 
-Mockito.verify(mockHttpClient, times(2)).execute(Mockito.argThat(new 
HttpGetForServiceLocal()));
-Mockito.verify(mockHttpClient, times(2)).execute(Mockito.argThat(new 
HttpGetForService2()));
+int expectedNumberOfInvocations = isAuthorizationCachingEnabled ? 1 : 2;
+Mockito.verify(mockHttpClient, 
times(expectedNumberOfInvocations)).execute(Mockito.argThat(new 
HttpGetForServiceLocal()));
+Mockito.verify(mockHttpClient, 
times(expectedNumberOfInvocations)).execute(Mockito.argThat(new 
HttpGetForService2()));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4e1aa05/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
--
diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml 
b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
index 8c88743..8cea256 100644
--- a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
+++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
@@ -29,10 +29,13 @@
 
   -->
 
-  
-fs.azure.secure.mode
-true
-  
+  
+  
 
   
 fs.azure.user.agent.prefix


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HDFS-10326. Disable setting tcp socket send/receive buffers for write pipelines. Contributed by Daryn Sharp.

2017-08-11 Thread wangda
HDFS-10326. Disable setting tcp socket send/receive buffers for write 
pipelines. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71b8dda4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71b8dda4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71b8dda4

Branch: refs/heads/YARN-5881
Commit: 71b8dda4f6ff6006410f3a9fe7717aa096004b1b
Parents: e0c2414
Author: Haohui Mai 
Authored: Tue Aug 8 14:58:11 2017 -0700
Committer: Haohui Mai 
Committed: Tue Aug 8 14:58:16 2017 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  4 ++--
 .../src/main/resources/hdfs-default.xml |  9 ++---
 .../hadoop/hdfs/TestDFSClientSocketSize.java| 20 
 3 files changed, 20 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b8dda4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index b636121..2681f12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -48,8 +48,8 @@ public final class HdfsConstants {
   public static final byte COLD_STORAGE_POLICY_ID = 2;
   public static final String COLD_STORAGE_POLICY_NAME = "COLD";
 
-  // TODO should be conf injected?
-  public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
+  public static final int DEFAULT_DATA_SOCKET_SIZE = 0;
+
   /**
* A special path component contained in the path for a snapshot file/dir
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b8dda4/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 8bf2b8c..bb62359 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2545,13 +2545,14 @@
 
 
   dfs.client.socket.send.buffer.size
-  131072
+  0
   
 Socket send buffer size for a write pipeline in DFSClient side.
 This may affect TCP connection throughput.
 If it is set to zero or negative value,
 no buffer size will be set explicitly,
 thus enable tcp auto-tuning on some system.
+The default value is 0.
   
 
 
@@ -3025,23 +3026,25 @@
 
 
   dfs.datanode.transfer.socket.send.buffer.size
-  131072
+  0
   
 Socket send buffer size for DataXceiver (mirroring packets to downstream
 in pipeline). This may affect TCP connection throughput.
 If it is set to zero or negative value, no buffer size will be set
 explicitly, thus enable tcp auto-tuning on some system.
+The default value is 0.
   
 
 
 
   dfs.datanode.transfer.socket.recv.buffer.size
-  131072
+  0
   
 Socket receive buffer size for DataXceiver (receiving packets from client
 during block writing). This may affect TCP connection throughput.
 If it is set to zero or negative value, no buffer size will be set
 explicitly, thus enable tcp auto-tuning on some system.
+The default value is 0.
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b8dda4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
index fa12f34..40cd676 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
@@ -30,7 +30,6 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.net.Socket;
 
-import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY;
 import static org.junit.Assert.assertTrue;
 
@@ -42,15 +41,16 @@ public class TestDFSClientSocketSize {
   }
 
   /**

[45/50] [abbrv] hadoop git commit: HDFS-12287. Remove a no-longer applicable TODO comment in DatanodeManager. Contributed by Chen Liang.

2017-08-11 Thread wangda
HDFS-12287. Remove a no-longer applicable TODO comment in DatanodeManager. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f13ca949
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f13ca949
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f13ca949

Branch: refs/heads/YARN-5881
Commit: f13ca94954072c9b898b142a5ff86f2c1f3ee55a
Parents: a32e013
Author: Yiqun Lin 
Authored: Fri Aug 11 14:13:45 2017 +0800
Committer: Yiqun Lin 
Committed: Fri Aug 11 14:13:45 2017 +0800

--
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f13ca949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index d705fec..78783ca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -212,8 +212,6 @@ public class DatanodeManager {
 this.namesystem = namesystem;
 this.blockManager = blockManager;
 
-// TODO: Enables DFSNetworkTopology by default after more stress
-// testings/validations.
 this.useDfsNetworkTopology = conf.getBoolean(
 DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY,
 DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HADOOP-14628. Upgrade maven enforcer plugin to 3.0.0-M1.

2017-08-11 Thread wangda
HADOOP-14628. Upgrade maven enforcer plugin to 3.0.0-M1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebabc709
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebabc709
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebabc709

Branch: refs/heads/YARN-5881
Commit: ebabc7094c6bcbd9063744331c69e3fba615fa62
Parents: a53b8b6
Author: Akira Ajisaka 
Authored: Wed Aug 9 13:16:31 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 9 13:18:16 2017 +0900

--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml  | 1 -
 hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml | 1 -
 pom.xml   | 2 +-
 3 files changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebabc709/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml 
b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
index e495a69..2f31fa6 100644
--- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
@@ -46,7 +46,6 @@
   
 org.apache.maven.plugins
 maven-enforcer-plugin
-1.4
 
   
 org.codehaus.mojo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebabc709/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml 
b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
index 68d1f5b..0e23db9 100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
@@ -50,7 +50,6 @@
   
 org.apache.maven.plugins
 maven-enforcer-plugin
-1.4
 
   
 org.codehaus.mojo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebabc709/pom.xml
--
diff --git a/pom.xml b/pom.xml
index d82cd9f..22a4b59 100644
--- a/pom.xml
+++ b/pom.xml
@@ -97,7 +97,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 1.7
 2.4
 2.10
-1.4.1
+3.0.0-M1
 2.10.4
 1.5
 
1.5


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: YARN-6515. Fix warnings from Spotbugs in hadoop-yarn-server-nodemanager. Contributed by Naganarasimha G R.

2017-08-11 Thread wangda
YARN-6515. Fix warnings from Spotbugs in hadoop-yarn-server-nodemanager. 
Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a18d5e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a18d5e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a18d5e5

Branch: refs/heads/YARN-5881
Commit: 1a18d5e514d13aa3a88e9b6089394a27296d6bc3
Parents: 8a4bff0
Author: Akira Ajisaka 
Authored: Wed Aug 9 21:56:34 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 9 21:56:43 2017 +0900

--
 .../server/nodemanager/NodeStatusUpdaterImpl.java| 11 +--
 .../localizer/ContainerLocalizer.java| 15 ---
 .../containermanager/monitor/ContainerMetrics.java   |  2 +-
 3 files changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a18d5e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 00073d8..b5ec383 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -639,7 +639,6 @@ public class NodeStatusUpdaterImpl extends AbstractService 
implements
   public void removeOrTrackCompletedContainersFromContext(
   List containerIds) throws IOException {
 Set removedContainers = new HashSet();
-Set removedNullContainers = new HashSet();
 
 pendingContainersToRemove.addAll(containerIds);
 Iterator iter = pendingContainersToRemove.iterator();
@@ -649,7 +648,6 @@ public class NodeStatusUpdaterImpl extends AbstractService 
implements
   Container nmContainer = context.getContainers().get(containerId);
   if (nmContainer == null) {
 iter.remove();
-removedNullContainers.add(containerId);
   } else if (nmContainer.getContainerState().equals(
 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.DONE))
 {
 context.getContainers().remove(containerId);
@@ -712,11 +710,12 @@ public class NodeStatusUpdaterImpl extends 
AbstractService implements
   public void removeVeryOldStoppedContainersFromCache() {
 synchronized (recentlyStoppedContainers) {
   long currentTime = System.currentTimeMillis();
-  Iterator i =
-  recentlyStoppedContainers.keySet().iterator();
+  Iterator> i =
+  recentlyStoppedContainers.entrySet().iterator();
   while (i.hasNext()) {
-ContainerId cid = i.next();
-if (recentlyStoppedContainers.get(cid) < currentTime) {
+Entry mapEntry = i.next();
+ContainerId cid = mapEntry.getKey();
+if (mapEntry.getValue() < currentTime) {
   if (!context.getContainers().containsKey(cid)) {
 i.remove();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a18d5e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index 8a46491..bb4b7f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -17,6 +17,8 @@
 */
 package 

[10/50] [abbrv] hadoop git commit: YARN-6957. Moving logging APIs over to slf4j in hadoop-yarn-server-sharedcachemanager. Contributed by Yeliang Cang.

2017-08-11 Thread wangda
YARN-6957. Moving logging APIs over to slf4j in 
hadoop-yarn-server-sharedcachemanager. Contributed by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0fbf179
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0fbf179
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0fbf179

Branch: refs/heads/YARN-5881
Commit: b0fbf1796585900a37dc4d6a271c5b5b32e9a9da
Parents: 839e077
Author: Akira Ajisaka 
Authored: Mon Aug 7 19:25:40 2017 +0900
Committer: Akira Ajisaka 
Committed: Mon Aug 7 19:25:40 2017 +0900

--
 .../yarn/server/sharedcachemanager/CleanerService.java  | 7 ---
 .../hadoop/yarn/server/sharedcachemanager/CleanerTask.java  | 7 ---
 .../server/sharedcachemanager/ClientProtocolService.java| 7 ---
 .../server/sharedcachemanager/SCMAdminProtocolService.java  | 8 
 .../yarn/server/sharedcachemanager/SharedCacheManager.java  | 9 +
 .../server/sharedcachemanager/metrics/CleanerMetrics.java   | 7 ---
 .../server/sharedcachemanager/metrics/ClientSCMMetrics.java | 7 ---
 .../metrics/SharedCacheUploaderMetrics.java | 8 
 .../server/sharedcachemanager/store/InMemorySCMStore.java   | 7 ---
 .../yarn/server/sharedcachemanager/webapp/SCMWebServer.java | 7 ---
 10 files changed, 41 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0fbf179/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java
index 60fc3e5..bcdc46b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java
@@ -26,8 +26,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -43,6 +41,8 @@ import 
org.apache.hadoop.yarn.server.sharedcachemanager.metrics.CleanerMetrics;
 import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The cleaner service that maintains the shared cache area, and cleans up 
stale
@@ -57,7 +57,8 @@ public class CleanerService extends CompositeService {
*/
   public static final String GLOBAL_CLEANER_PID = ".cleaner_pid";
 
-  private static final Log LOG = LogFactory.getLog(CleanerService.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(CleanerService.class);
 
   private Configuration conf;
   private CleanerMetrics metrics;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0fbf179/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java
index a7fdcbd..3e0a62b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.sharedcachemanager;
 import java.io.IOException;
 import 

[14/50] [abbrv] hadoop git commit: HDFS-12264. DataNode uses a deprecated method IoUtils#cleanup. Contributed by Ajay Yadav.

2017-08-11 Thread wangda
HDFS-12264. DataNode uses a deprecated method IoUtils#cleanup. Contributed by 
Ajay Yadav.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc206806
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc206806
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc206806

Branch: refs/heads/YARN-5881
Commit: bc206806dadc5dc85f182d98d859307cfb33172b
Parents: adb84f3
Author: Arpit Agarwal 
Authored: Mon Aug 7 15:05:10 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 7 15:05:10 2017 -0700

--
 .../hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc206806/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 1574431..46ea1c8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -293,7 +293,7 @@ public class IOUtils {
*/
   public static void closeStream(java.io.Closeable stream) {
 if (stream != null) {
-  cleanup(null, stream);
+  cleanupWithLogger(null, stream);
 }
   }
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: YARN-6961. Remove commons-logging dependency from hadoop-yarn-server-applicationhistoryservice module. Contributed by Yeliang Cang.

2017-08-11 Thread wangda
YARN-6961. Remove commons-logging dependency from 
hadoop-yarn-server-applicationhistoryservice module. Contributed by Yeliang 
Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98912950
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98912950
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98912950

Branch: refs/heads/YARN-5881
Commit: 98912950b6167523f6238a90ce69da817db91308
Parents: 55a181f
Author: Akira Ajisaka 
Authored: Tue Aug 8 19:38:58 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Aug 8 19:38:58 2017 +0900

--
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98912950/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index d732af4..cace493 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -131,10 +131,6 @@
   com.google.guava
   guava
 
-
-  commons-logging
-  commons-logging
-
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: YARN-4161. Capacity Scheduler : Assign single or multiple containers per heart beat driven by configuration. (Wei Yan via wangda)

2017-08-11 Thread wangda
YARN-4161. Capacity Scheduler : Assign single or multiple containers per heart 
beat driven by configuration. (Wei Yan via wangda)

Change-Id: Ic441ae4e0bf72e7232411eb54243ec143d5fd0d3


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/adb84f34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/adb84f34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/adb84f34

Branch: refs/heads/YARN-5881
Commit: adb84f34db7e1cdcd72aa8e3deb464c48da9e353
Parents: a3a9c97
Author: Wangda Tan 
Authored: Mon Aug 7 11:32:12 2017 -0700
Committer: Wangda Tan 
Committed: Mon Aug 7 11:32:21 2017 -0700

--
 .../scheduler/capacity/CapacityScheduler.java   |  53 -
 .../CapacitySchedulerConfiguration.java |  23 ++
 .../capacity/TestCapacityScheduler.java | 232 ++-
 3 files changed, 289 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/adb84f34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 2ccaf63..3286982 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -94,11 +94,9 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueInvalidExcep
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedContainerChangeRequest;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
 
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
@@ -163,6 +161,9 @@ public class CapacityScheduler extends
 
   private int offswitchPerHeartbeatLimit;
 
+  private boolean assignMultipleEnabled;
+
+  private int maxAssignPerHeartbeat;
 
   @Override
   public void setConf(Configuration conf) {
@@ -308,6 +309,9 @@ public class CapacityScheduler extends
   asyncScheduleInterval = this.conf.getLong(ASYNC_SCHEDULER_INTERVAL,
   DEFAULT_ASYNC_SCHEDULER_INTERVAL);
 
+  this.assignMultipleEnabled = this.conf.getAssignMultipleEnabled();
+  this.maxAssignPerHeartbeat = this.conf.getMaxAssignPerHeartbeat();
+
   // number of threads for async scheduling
   int maxAsyncSchedulingThreads = this.conf.getInt(
   
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD,
@@ -1109,17 +1113,29 @@ public class CapacityScheduler extends
   .getAssignmentInformation().getReserved());
   }
 
-  private boolean canAllocateMore(CSAssignment assignment, int offswitchCount) 
{
-if (null != assignment && Resources.greaterThan(getResourceCalculator(),
-getClusterResource(), assignment.getResource(), Resources.none())
-&& offswitchCount < offswitchPerHeartbeatLimit) {
-  // And it should not be a reserved container
-  if (assignment.getAssignmentInformation().getNumReservations() == 0) {
-return true;
-  }
+  private boolean canAllocateMore(CSAssignment assignment, int offswitchCount,
+  int assignedContainers) {
+// Current assignment shouldn't be empty
+if (assignment == null
+|| Resources.equals(assignment.getResource(), Resources.none())) {
+  return false;
 }
 
-return false;
+// offswitch assignment should be under threshold
+if (offswitchCount >= offswitchPerHeartbeatLimit) {
+  return false;
+}

[06/50] [abbrv] hadoop git commit: HADOOP-14722. Azure: BlockBlobInputStream position incorrect after seek. Contributed by Thomas Marquardt

2017-08-11 Thread wangda
HADOOP-14722. Azure: BlockBlobInputStream position incorrect after seek.
Contributed by Thomas Marquardt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d91b7a84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d91b7a84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d91b7a84

Branch: refs/heads/YARN-5881
Commit: d91b7a8451489f97bdde928cea774764155cfe03
Parents: 024c3ec
Author: Steve Loughran 
Authored: Sun Aug 6 20:19:23 2017 +0100
Committer: Steve Loughran 
Committed: Sun Aug 6 20:19:23 2017 +0100

--
 .../hadoop/fs/azure/BlockBlobInputStream.java   | 91 +++-
 .../fs/azure/TestBlockBlobInputStream.java  | 85 --
 2 files changed, 150 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d91b7a84/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
index 5542415..c37b2be 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
@@ -43,11 +43,16 @@ final class BlockBlobInputStream extends InputStream 
implements Seekable {
   private InputStream blobInputStream = null;
   private int minimumReadSizeInBytes = 0;
   private long streamPositionAfterLastRead = -1;
+  // position of next network read within stream
   private long streamPosition = 0;
+  // length of stream
   private long streamLength = 0;
   private boolean closed = false;
+  // internal buffer, re-used for performance optimization
   private byte[] streamBuffer;
+  // zero-based offset within streamBuffer of current read position
   private int streamBufferPosition;
+  // length of data written to streamBuffer, streamBuffer may be larger
   private int streamBufferLength;
 
   /**
@@ -82,6 +87,16 @@ final class BlockBlobInputStream extends InputStream 
implements Seekable {
   }
 
   /**
+   * Reset the internal stream buffer but do not release the memory.
+   * The buffer can be reused to avoid frequent memory allocations of
+   * a large buffer.
+   */
+  private void resetStreamBuffer() {
+streamBufferPosition = 0;
+streamBufferLength = 0;
+  }
+
+  /**
* Gets the read position of the stream.
* @return the zero-based byte offset of the read position.
* @throws IOException IO failure
@@ -89,7 +104,9 @@ final class BlockBlobInputStream extends InputStream 
implements Seekable {
   @Override
   public synchronized long getPos() throws IOException {
 checkState();
-return streamPosition;
+return (streamBuffer != null)
+? streamPosition - streamBufferLength + streamBufferPosition
+: streamPosition;
   }
 
   /**
@@ -107,21 +124,39 @@ final class BlockBlobInputStream extends InputStream 
implements Seekable {
   throw new EOFException(
   FSExceptionMessages.CANNOT_SEEK_PAST_EOF + " " + pos);
 }
-if (pos == getPos()) {
+
+// calculate offset between the target and current position in the stream
+long offset = pos - getPos();
+
+if (offset == 0) {
   // no=op, no state change
   return;
 }
 
+if (offset > 0) {
+  // forward seek, data can be skipped as an optimization
+  if (skip(offset) != offset) {
+throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
+  }
+  return;
+}
+
+// reverse seek, offset is negative
 if (streamBuffer != null) {
-  long offset = streamPosition - pos;
-  if (offset > 0 && offset < streamBufferLength) {
-streamBufferPosition = streamBufferLength - (int) offset;
+  if (streamBufferPosition + offset >= 0) {
+// target position is inside the stream buffer,
+// only need to move backwards within the stream buffer
+streamBufferPosition += offset;
   } else {
-streamBufferPosition = streamBufferLength;
+// target position is outside the stream buffer,
+// need to reset stream buffer and move position for next network read
+resetStreamBuffer();
+streamPosition = pos;
   }
+} else {
+  streamPosition = pos;
 }
 
-streamPosition = pos;
 // close BlobInputStream after seek is invoked because BlobInputStream
 // does not support seek
 closeBlobInputStream();
@@ -189,8 +224,7 @@ final class BlockBlobInputStream extends InputStream 
implements Seekable {
 streamBuffer = new byte[(int) 

[03/50] [abbrv] hadoop git commit: HDFS-12224. Add tests to TestJournalNodeSync for sync after JN downtime. Contributed by Hanisha Koneru.

2017-08-11 Thread wangda
HDFS-12224. Add tests to TestJournalNodeSync for sync after JN downtime. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbc6d254
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbc6d254
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbc6d254

Branch: refs/heads/YARN-5881
Commit: bbc6d254c8a953abba69415d80edeede3ee6269d
Parents: fe33417
Author: Arpit Agarwal 
Authored: Fri Aug 4 12:51:33 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Aug 4 12:51:33 2017 -0700

--
 .../hadoop/hdfs/qjournal/server/Journal.java|   3 +-
 .../hdfs/qjournal/server/JournalMetrics.java|  11 +
 .../hdfs/qjournal/server/JournalNodeSyncer.java |   4 +
 .../hdfs/qjournal/TestJournalNodeSync.java  | 265 ---
 .../hdfs/qjournal/server/TestJournalNode.java   |   6 +-
 .../qjournal/server/TestJournalNodeSync.java| 439 +++
 6 files changed, 458 insertions(+), 270 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbc6d254/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 0041d5e..0f4091d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -286,8 +286,7 @@ public class Journal implements Closeable {
 fjm.setLastReadableTxId(val);
   }
 
-  @VisibleForTesting
-  JournalMetrics getMetricsForTests() {
+  JournalMetrics getMetrics() {
 return metrics;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbc6d254/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
index cffe2c1..fcfd901 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
@@ -45,6 +45,9 @@ class JournalMetrics {
   
   @Metric("Number of batches written where this node was lagging")
   MutableCounterLong batchesWrittenWhileLagging;
+
+  @Metric("Number of edit logs downloaded by JournalNodeSyncer")
+  private MutableCounterLong numEditLogsSynced;
   
   private final int[] QUANTILE_INTERVALS = new int[] {
   1*60, // 1m
@@ -120,4 +123,12 @@ class JournalMetrics {
   q.add(us);
 }
   }
+
+  public MutableCounterLong getNumEditLogsSynced() {
+return numEditLogsSynced;
+  }
+
+  public void incrNumEditLogsSynced() {
+numEditLogsSynced.incr();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbc6d254/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
index 479f6a0..537ba0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
@@ -77,6 +77,7 @@ public class JournalNodeSyncer {
   private final long journalSyncInterval;
   private final int logSegmentTransferTimeout;
   private final DataTransferThrottler throttler;
+  private final JournalMetrics metrics;
 
   JournalNodeSyncer(JournalNode jouranlNode, Journal journal, String jid,
   Configuration conf) {
@@ -93,6 +94,7 @@ public class JournalNodeSyncer {
 DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_TIMEOUT_KEY,
 DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_TIMEOUT_DEFAULT);
 throttler = getThrottler(conf);
+metrics = journal.getMetrics();
   }
 
   void stopSync() {
@@ -411,6 +413,8 @@ public class JournalNodeSyncer {
 LOG.warn("Deleting " + tmpEditsFile + " has failed");
   }
   return false;
+} else {
+  metrics.incrNumEditLogsSynced();
 }
 

[05/50] [abbrv] hadoop git commit: HADOOP-14685. Exclude some test jars from hadoop-client-minicluster jar. Contributed by Bharat Viswanadham.

2017-08-11 Thread wangda
HADOOP-14685. Exclude some test jars from hadoop-client-minicluster jar. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/024c3ec4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/024c3ec4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/024c3ec4

Branch: refs/heads/YARN-5881
Commit: 024c3ec4a3ad47cf30501497c7ae810a30634f82
Parents: f44b349
Author: Arpit Agarwal 
Authored: Fri Aug 4 16:46:59 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Aug 4 16:46:59 2017 -0700

--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/024c3ec4/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index f4b2329..5255640 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -634,6 +634,13 @@
 **/*
   
 
+
+  
org.apache.hadoop:hadoop-mapreduce-client-jobclient:*
+  
+testjar/*
+testshell/*
+  
+
   
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: YARN-6955. Handle concurrent register AM requests in FederationInterceptor. (Botong Huang via Subru).

2017-08-11 Thread wangda
YARN-6955. Handle concurrent register AM requests in FederationInterceptor. 
(Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c61f2c41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c61f2c41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c61f2c41

Branch: refs/heads/YARN-5881
Commit: c61f2c419830e40bb47fb2b1fe1f7d6109ed29a9
Parents: bc20680
Author: Subru Krishnan 
Authored: Mon Aug 7 16:58:29 2017 -0700
Committer: Subru Krishnan 
Committed: Mon Aug 7 16:58:29 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|  4 +-
 .../yarn/server/MockResourceManagerFacade.java  | 18 ++--
 .../amrmproxy/FederationInterceptor.java| 43 --
 .../amrmproxy/TestFederationInterceptor.java| 88 ++--
 4 files changed, 110 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c61f2c41/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 034f03c..6825a36 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -594,11 +594,9 @@
 
   
 
-  
   
 
-
-
+
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c61f2c41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index 68c55ac..e33d7e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -246,6 +246,16 @@ public class MockResourceManagerFacade implements 
ApplicationClientProtocol,
 
 shouldReRegisterNext = false;
 
+synchronized (applicationContainerIdMap) {
+  if (applicationContainerIdMap.containsKey(amrmToken)) {
+throw new InvalidApplicationMasterRequestException(
+AMRMClientUtils.APP_ALREADY_REGISTERED_MESSAGE);
+  }
+  // Keep track of the containers that are returned to this application
+  applicationContainerIdMap.put(amrmToken, new ArrayList());
+}
+
+// Make sure we wait for certain test cases last in the method
 synchronized (syncObj) {
   syncObj.notifyAll();
   // We reuse the port number to indicate whether the unit test want us to
@@ -261,14 +271,6 @@ public class MockResourceManagerFacade implements 
ApplicationClientProtocol,
   }
 }
 
-synchronized (applicationContainerIdMap) {
-  if (applicationContainerIdMap.containsKey(amrmToken)) {
-throw new InvalidApplicationMasterRequestException(
-AMRMClientUtils.APP_ALREADY_REGISTERED_MESSAGE);
-  }
-  // Keep track of the containers that are returned to this application
-  applicationContainerIdMap.put(amrmToken, new ArrayList());
-}
 return RegisterApplicationMasterResponse.newInstance(null, null, null, 
null,
 null, request.getHost(), null);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c61f2c41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
index ffe47f4..28724aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
+++ 

hadoop git commit: HDFS-12281. Ozone: Ozone-default.xml has 3 properties that do not match the default Config value. Contributed by Ajay Yadav.

2017-08-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 bd23dbfbf -> 531d9ced9


HDFS-12281. Ozone: Ozone-default.xml has 3 properties that do not match the 
default Config value. Contributed by Ajay Yadav.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/531d9ced
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/531d9ced
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/531d9ced

Branch: refs/heads/HDFS-7240
Commit: 531d9ced9884ea1ff2512b31a4faf4dc9c2d45cb
Parents: bd23dbf
Author: Anu Engineer 
Authored: Fri Aug 11 10:12:48 2017 -0700
Committer: Anu Engineer 
Committed: Fri Aug 11 10:12:48 2017 -0700

--
 .../src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java   | 2 +-
 .../hadoop-hdfs/src/main/resources/ozone-default.xml | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/531d9ced/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 92017a0..8a99359 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -68,7 +68,7 @@ public final class OzoneConfigKeys {
   public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB =
   "RocksDB";
   public static final String OZONE_METADATA_STORE_IMPL_DEFAULT =
-  OZONE_METADATA_STORE_IMPL_LEVELDB;
+  OZONE_METADATA_STORE_IMPL_ROCKSDB;
 
   public static final String OZONE_KEY_CACHE = "ozone.key.cache.size";
   public static final int OZONE_KEY_CACHE_DEFAULT = 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/531d9ced/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
index 17a127d..3575d5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
@@ -35,7 +35,7 @@
 
   
 ozone.handler.type
-local
+distributed
 
   The second key dfs.storage.handler.type tells ozone which storage
   handler to use. The possible values are:
@@ -195,7 +195,7 @@
 
   
 ozone.scm.handler.count.key
-20
+10
 
   The number of RPC handler threads for each SCM service endpoint.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-10392. Use FileSystem#makeQualified(Path) instead of Path#makeQualified(FileSystem) (ajisakaa via aw)

2017-08-11 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk d964062f6 -> 4222c9710


HADOOP-10392. Use FileSystem#makeQualified(Path) instead of 
Path#makeQualified(FileSystem) (ajisakaa via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4222c971
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4222c971
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4222c971

Branch: refs/heads/trunk
Commit: 4222c971080f2b150713727092c7197df58c88e5
Parents: d964062
Author: Allen Wittenauer 
Authored: Fri Aug 11 09:25:56 2017 -0700
Committer: Allen Wittenauer 
Committed: Fri Aug 11 09:25:56 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileUtil.java |  4 +--
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |  4 +--
 .../java/org/apache/hadoop/io/SequenceFile.java |  2 +-
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  6 ++---
 .../java/org/apache/hadoop/io/FileBench.java|  2 +-
 .../mapred/MiniMRClientClusterFactory.java  |  4 +--
 .../mapred/TestCombineFileInputFormat.java  |  6 ++---
 .../TestCombineSequenceFileInputFormat.java |  7 +++--
 .../mapred/TestCombineTextInputFormat.java  |  7 +++--
 .../mapred/TestConcatenatedCompressedInput.java |  6 ++---
 .../org/apache/hadoop/mapred/TestMapRed.java|  4 +--
 .../hadoop/mapred/TestMiniMRChildTask.java  |  4 +--
 .../hadoop/mapred/TestTextInputFormat.java  |  8 +++---
 .../TestWrappedRecordReaderClassloader.java |  4 +--
 .../lib/join/TestWrappedRRClassloader.java  |  4 +--
 .../mapreduce/util/MRAsyncDiskService.java  |  2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  4 +--
 .../v2/TestMRJobsWithHistoryService.java|  4 +--
 .../org/apache/hadoop/tools/HadoopArchives.java |  2 +-
 .../apache/hadoop/mapred/gridmix/Gridmix.java   |  2 +-
 .../hadoop/mapred/gridmix/PseudoLocalFs.java|  8 +-
 .../hadoop/mapred/gridmix/TestFilePool.java |  4 +--
 .../hadoop/mapred/gridmix/TestFileQueue.java|  8 +++---
 .../mapred/gridmix/TestPseudoLocalFs.java   |  2 +-
 .../hadoop/mapred/gridmix/TestUserResolve.java  |  4 +--
 .../hadoop/fs/swift/util/SwiftTestUtils.java|  2 +-
 .../fs/swift/SwiftFileSystemBaseTest.java   |  2 +-
 .../TestSwiftFileSystemPartitionedUploads.java  |  4 +--
 .../hadoop/tools/rumen/TestHistograms.java  |  6 ++---
 .../org/apache/hadoop/streaming/StreamJob.java  | 27 ++--
 30 files changed, 78 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4222c971/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index eb8a5c3..72b9615 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -295,8 +295,8 @@ public class FileUtil {
 Path dst)
 throws IOException {
 if (srcFS == dstFS) {
-  String srcq = src.makeQualified(srcFS).toString() + Path.SEPARATOR;
-  String dstq = dst.makeQualified(dstFS).toString() + Path.SEPARATOR;
+  String srcq = srcFS.makeQualified(src).toString() + Path.SEPARATOR;
+  String dstq = dstFS.makeQualified(dst).toString() + Path.SEPARATOR;
   if (dstq.startsWith(srcq)) {
 if (srcq.length() == dstq.length()) {
   throw new IOException("Cannot copy " + src + " to itself.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4222c971/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 4c1236b..644cf4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -505,7 +505,7 @@ public class FTPFileSystem extends FileSystem {
   long modTime = -1; // Modification time of root dir not known.
   Path root = new Path("/");
   return new FileStatus(length, isDir, blockReplication, blockSize,
-  modTime, root.makeQualified(this));
+  modTime, this.makeQualified(root));
 }
 String pathName = 

hadoop git commit: HDFS-12028. Reapply: Ozone: CLI: remove noisy slf4j binding output from hdfs oz command. Contributed by Chen Liang.

2017-08-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 4fac755bc -> bd23dbfbf


HDFS-12028. Reapply: Ozone: CLI: remove noisy slf4j binding output from hdfs oz 
command. Contributed by Chen Liang.

While committing HDFS-12115, I accidently removed this patch. Reapplying the 
same patch again.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd23dbfb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd23dbfb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd23dbfb

Branch: refs/heads/HDFS-7240
Commit: bd23dbfbf740de68b1f7c912f5ec3bf5f6d19a9a
Parents: 4fac755
Author: Anu Engineer 
Authored: Fri Aug 11 09:10:34 2017 -0700
Committer: Anu Engineer 
Committed: Fri Aug 11 09:10:34 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd23dbfb/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 101cdaa..b8364d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -213,6 +213,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   target
   2.5.3
   true
+  
+
+  ch.qos.logback
+  logback-classic
+
+  
 
 
   org.jctools


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12286. Ozone: Extend MBeans utility to add any key value pairs to the registered MXBeans. Contributed by Elek, Marton.

2017-08-11 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 482c09462 -> 4fac755bc


HDFS-12286. Ozone: Extend MBeans utility to add any key value pairs to the 
registered MXBeans. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fac755b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fac755b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fac755b

Branch: refs/heads/HDFS-7240
Commit: 4fac755bc765291197c3ead2846f4f4fd7332100
Parents: 482c094
Author: Anu Engineer 
Authored: Fri Aug 11 08:32:23 2017 -0700
Committer: Anu Engineer 
Committed: Fri Aug 11 08:32:23 2017 -0700

--
 .../org/apache/hadoop/metrics2/util/MBeans.java |  35 ++-
 .../hadoop/metrics2/util/DummyMXBean.java   |  23 +
 .../apache/hadoop/metrics2/util/TestMBeans.java | 101 +++
 3 files changed, 156 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fac755b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
index ded49d6..4c75160 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
@@ -18,13 +18,17 @@
 package org.apache.hadoop.metrics2.util;
 
 import java.lang.management.ManagementFactory;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
 import javax.management.InstanceAlreadyExistsException;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -60,8 +64,25 @@ public class MBeans {
*/
   static public ObjectName register(String serviceName, String nameName,
 Object theMbean) {
+return register(serviceName, nameName, new HashMap(), 
theMbean);
+  }
+
+  /**
+   * Register the MBean using our standard MBeanName format
+   * "hadoop:service=,name="
+   * Where the  and  are the supplied parameters
+   *
+   * @param serviceName
+   * @param nameName
+   * @param properties - Key value pairs to define additional JMX ObjectName 
properties.
+   * @param theMbean- the MBean to register
+   * @return the named used to register the MBean
+   */
+  static public ObjectName register(String serviceName, String nameName,
+Map properties,
+Object theMbean) {
 final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
-ObjectName name = getMBeanName(serviceName, nameName);
+ObjectName name = getMBeanName(serviceName, nameName, properties);
 if (name != null) {
   try {
 mbs.registerMBean(theMbean, name);
@@ -116,9 +137,17 @@ public class MBeans {
 DefaultMetricsSystem.removeMBeanName(mbeanName);
   }
 
-  static private ObjectName getMBeanName(String serviceName, String nameName) {
+  @VisibleForTesting
+  static ObjectName getMBeanName(String serviceName, String nameName,
+ Map 
additionalParameters) {
+
+String additionalKeys = additionalParameters.entrySet()
+.stream()
+.map(entry -> entry.getKey() + "=" + entry.getValue())
+.collect(Collectors.joining(","));
+
 String nameStr = DOMAIN_PREFIX + SERVICE_PREFIX + serviceName + "," +
-NAME_PREFIX + nameName;
+NAME_PREFIX + nameName + (additionalKeys.isEmpty() ? "" : "," + 
additionalKeys);
 try {
   return DefaultMetricsSystem.newMBeanName(nameStr);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fac755b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/DummyMXBean.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/DummyMXBean.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/DummyMXBean.java
new file mode 100644
index 000..4cf14a3
--- /dev/null
+++ 

hadoop git commit: YARN-6133. [ATSv2 Security] Renew delegation token for app automatically if an app collector is active. Contributed by Varun Saxena.

2017-08-11 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355_branch2 798069390 -> 938a68f0d


YARN-6133. [ATSv2 Security] Renew delegation token for app automatically if an 
app collector is active. Contributed by Varun Saxena.

(cherry picked from commit ffb1f572b68e73efd6410ab74a334f4e5df543f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/938a68f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/938a68f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/938a68f0

Branch: refs/heads/YARN-5355_branch2
Commit: 938a68f0d6faf2c1d0524349816d397fd8e9935b
Parents: 7980693
Author: Rohith Sharma K S 
Authored: Thu Aug 10 11:12:57 2017 +0530
Committer: Rohith Sharma K S 
Committed: Fri Aug 11 14:17:34 2017 +0530

--
 .../security/TestTimelineAuthFilterForV2.java   | 27 +-
 .../collector/AppLevelTimelineCollector.java| 17 +++-
 .../collector/NodeTimelineCollectorManager.java | 88 +++-
 .../collector/TimelineCollector.java|  7 ++
 .../collector/TimelineCollectorManager.java |  8 +-
 ...neV2DelegationTokenSecretManagerService.java |  6 ++
 6 files changed, 139 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/938a68f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index 84d892d..78c5c66 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.atLeastOnce;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
@@ -183,6 +184,13 @@ public class TestTimelineAuthFilterForV2 {
   conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
   HttpConfig.Policy.HTTP_ONLY.name());
 }
+if (!withKerberosLogin) {
+  // For timeline delegation token based access, set delegation token renew
+  // interval to 100 ms. to test if timeline delegation token for the app 
is
+  // renewed automatically if app is still alive.
+  conf.setLong(
+  YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL, 100);
+}
 UserGroupInformation.setConfiguration(conf);
 collectorManager = new DummyNodeTimelineCollectorManager();
 auxService = PerNodeTimelineCollectorsAuxService.launchServer(
@@ -282,12 +290,12 @@ public class TestTimelineAuthFilterForV2 {
   }
 
   private void publishAndVerifyEntity(ApplicationId appId, File entityTypeDir,
-  String entityType) throws Exception {
+  String entityType, int numEntities) throws Exception {
 TimelineV2Client client = createTimelineClientForUGI(appId);
 try {
 // Sync call. Results available immediately.
   client.putEntities(createEntity("entity1", entityType));
-  assertEquals(1, entityTypeDir.listFiles().length);
+  assertEquals(numEntities, entityTypeDir.listFiles().length);
   verifyEntity(entityTypeDir, "entity1", entityType);
   // Async call.
   client.putEntitiesAsync(createEntity("entity2", entityType));
@@ -312,12 +320,22 @@ public class TestTimelineAuthFilterForV2 {
 KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable() {
   @Override
   public Void call() throws Exception {
-publishAndVerifyEntity(appId, entityTypeDir, entityType);
+publishAndVerifyEntity(appId, entityTypeDir, entityType, 1);
 return null;
   }
 });
   } else {
-publishAndVerifyEntity(appId, entityTypeDir, entityType);
+publishAndVerifyEntity(appId, entityTypeDir, entityType, 1);
+// Verify if token is renewed automatically and entities can still be
+// published.
+Thread.sleep(1000);
+

hadoop git commit: HDFS-12196. Ozone: DeleteKey-2: Implement block deleting service to delete stale blocks at background. Contributed by Weiwei Yang.

2017-08-11 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 0e32bf179 -> 482c09462


HDFS-12196. Ozone: DeleteKey-2: Implement block deleting service to delete 
stale blocks at background. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/482c0946
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/482c0946
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/482c0946

Branch: refs/heads/HDFS-7240
Commit: 482c0946221a56e8020fb6ec1a1ff27ceea9ff00
Parents: 0e32bf1
Author: Weiwei Yang 
Authored: Fri Aug 11 18:45:55 2017 +0800
Committer: Weiwei Yang 
Committed: Fri Aug 11 18:45:55 2017 +0800

--
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  18 ++
 .../common/impl/ContainerManagerImpl.java   |   2 +-
 .../background/BlockDeletingService.java| 211 +
 .../statemachine/background/package-info.java   |  18 ++
 .../container/ozoneimpl/OzoneContainer.java |  14 +
 .../apache/hadoop/utils/BackgroundService.java  | 147 +
 .../org/apache/hadoop/utils/BackgroundTask.java |  28 ++
 .../hadoop/utils/BackgroundTaskQueue.java   |  64 
 .../hadoop/utils/BackgroundTaskResult.java  |  29 ++
 .../src/main/resources/ozone-default.xml|  31 ++
 .../TestUtils/BlockDeletingServiceTestImpl.java |  99 ++
 .../common/TestBlockDeletingService.java| 312 +++
 12 files changed, 972 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/482c0946/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 55b5f88..92017a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -94,6 +94,24 @@ public final class OzoneConfigKeys {
   "ozone.client.connection.timeout.ms";
   public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT = 5000;
 
+  /**
+   * Configuration properties for Ozone Block Deleting Service.
+   */
+  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS =
+  "ozone.block.deleting.service.interval.ms";
+  public static final int OZONE_BLOCK_DELETING_SERVICE_INTERVAL_MS_DEFAULT
+  = 6;
+
+  public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER =
+  "ozone.block.deleting.limit.per.task";
+  public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT
+  = 1000;
+
+  public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL
+  = "ozone.block.deleting.container.limit.per.interval";
+  public static final int
+  OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
+
   public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
   = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
   public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/482c0946/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index b77ac55..aa6946c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -658,7 +658,7 @@ public class ContainerManagerImpl implements 
ContainerManager {
 
 
   @VisibleForTesting
-  ConcurrentSkipListMap getContainerMap() {
+  public ConcurrentSkipListMap getContainerMap() {
 return containerMap;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/482c0946/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
 

[1/2] hadoop git commit: HADOOP-14754. TestCommonConfigurationFields failed: core-default.xml has 2 wasb properties missing in classes. Contributed by John Zhuge.

2017-08-11 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 304cc1a85 -> 9c6e8ece9
  refs/heads/trunk f13ca9495 -> d964062f6


HADOOP-14754. TestCommonConfigurationFields failed: core-default.xml has 2 wasb 
properties missing in classes.
Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d964062f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d964062f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d964062f

Branch: refs/heads/trunk
Commit: d964062f66c0772f4b1a029bfcdff921fbaaf91c
Parents: f13ca94
Author: Steve Loughran 
Authored: Fri Aug 11 10:18:17 2017 +0100
Committer: Steve Loughran 
Committed: Fri Aug 11 10:18:17 2017 +0100

--
 .../org/apache/hadoop/conf/TestCommonConfigurationFields.java  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d964062f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index da37e68..d0e0a35 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -103,6 +103,12 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPrefixToSkipCompare.add("fs.s3n.");
 xmlPrefixToSkipCompare.add("s3native.");
 
+// WASB properties are in a different subtree.
+// - org.apache.hadoop.fs.azure.NativeAzureFileSystem
+xmlPrefixToSkipCompare.add("fs.wasb.impl");
+xmlPrefixToSkipCompare.add("fs.wasbs.impl");
+xmlPrefixToSkipCompare.add("fs.azure.");
+
 // ADL properties are in a different subtree
 // - org.apache.hadoop.hdfs.web.ADLConfKeys
 xmlPrefixToSkipCompare.add("adl.");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-14754. TestCommonConfigurationFields failed: core-default.xml has 2 wasb properties missing in classes. Contributed by John Zhuge.

2017-08-11 Thread stevel
HADOOP-14754. TestCommonConfigurationFields failed: core-default.xml has 2 wasb 
properties missing in classes.
Contributed by John Zhuge.

(cherry picked from commit d964062f66c0772f4b1a029bfcdff921fbaaf91c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c6e8ece
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c6e8ece
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c6e8ece

Branch: refs/heads/branch-2
Commit: 9c6e8ece981cef182059256bc91dbfa1129ddcb5
Parents: 304cc1a
Author: Steve Loughran 
Authored: Fri Aug 11 10:58:26 2017 +0100
Committer: Steve Loughran 
Committed: Fri Aug 11 10:58:26 2017 +0100

--
 .../hadoop/conf/TestCommonConfigurationFields.java  | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c6e8ece/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 228ee89..35c056c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -105,6 +105,18 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPrefixToSkipCompare.add("s3.");
 xmlPrefixToSkipCompare.add("s3native.");
 
+// WASB properties are in a different subtree.
+// - org.apache.hadoop.fs.azure.NativeAzureFileSystem
+xmlPrefixToSkipCompare.add("fs.wasb.impl");
+xmlPrefixToSkipCompare.add("fs.wasbs.impl");
+xmlPrefixToSkipCompare.add("fs.azure.");
+
+// ADL properties are in a different subtree
+// - org.apache.hadoop.hdfs.web.ADLConfKeys
+xmlPrefixToSkipCompare.add("adl.");
+xmlPrefixToSkipCompare.add("fs.adl.");
+xmlPropsToSkipCompare.add("fs.AbstractFileSystem.adl.impl");
+
 // Azure properties are in a different class
 // - org.apache.hadoop.fs.azure.AzureNativeFileSystemStore
 // - org.apache.hadoop.fs.azure.SASKeyGeneratorImpl


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-6130. [ATSv2 Security] Generate a delegation token for AM when app collector is created and pass it to AM via NM and RM. Contributed by Varun Saxena.

2017-08-11 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355_branch2 e2ffa0f51 -> 798069390


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79806939/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 3234d6f..f826631 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -239,6 +240,11 @@ public abstract class MockAsm extends MockApps {
 public boolean isAppInCompletedStates() {
   throw new UnsupportedOperationException("Not supported yet.");
 }
+
+@Override
+public CollectorInfo getCollectorInfo() {
+  throw new UnsupportedOperationException("Not supported yet.");
+}
   }
 
   public static RMApp newApplication(int i) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79806939/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index 9365e54..17cafef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -321,13 +322,13 @@ public class MockRMApp implements RMApp {
 return false;
   }
 
-  public String getCollectorAddr() {
+  @Override
+  public AppCollectorData getCollectorData() {
 throw new UnsupportedOperationException("Not supported yet.");
   }
 
   @Override
-  public AppCollectorData getCollectorData() {
+  public CollectorInfo getCollectorInfo() {
 throw new UnsupportedOperationException("Not supported yet.");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79806939/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
index 07058f6..eb4381d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
+++ 

[2/2] hadoop git commit: YARN-6130. [ATSv2 Security] Generate a delegation token for AM when app collector is created and pass it to AM via NM and RM. Contributed by Varun Saxena.

2017-08-11 Thread rohithsharmaks
YARN-6130. [ATSv2 Security] Generate a delegation token for AM when app 
collector is created and pass it to AM via NM and RM. Contributed by Varun 
Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79806939
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79806939
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79806939

Branch: refs/heads/YARN-5355_branch2
Commit: 798069390739ab6971ca038aad4cd0adc4b9855a
Parents: e2ffa0f
Author: Rohith Sharma K S 
Authored: Fri Aug 11 12:35:35 2017 +0530
Committer: Rohith Sharma K S 
Committed: Fri Aug 11 12:35:35 2017 +0530

--
 .../v2/app/rm/RMContainerAllocator.java |   9 +-
 .../app/local/TestLocalContainerAllocator.java  |   2 +-
 .../api/protocolrecords/AllocateResponse.java   |  92 +---
 .../hadoop/yarn/api/records/CollectorInfo.java  |  55 +++
 .../src/main/proto/yarn_protos.proto|   5 +
 .../src/main/proto/yarn_service_protos.proto|   2 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   6 +-
 .../ApplicationMasterServiceProtoTestBase.java  |  72 +
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  22 ++-
 ...ationMasterServiceProtocolForTimelineV2.java |  71 +
 ...estApplicationMasterServiceProtocolOnHA.java |  46 +-
 .../api/async/impl/TestAMRMClientAsync.java |   4 +-
 .../impl/pb/AllocateResponsePBImpl.java |  37 -
 .../records/impl/pb/CollectorInfoPBImpl.java| 148 +++
 .../hadoop/yarn/api/TestPBImplRecords.java  |   2 +
 .../ReportNewCollectorInfoRequest.java  |   5 +-
 .../impl/pb/NodeHeartbeatRequestPBImpl.java |  25 +++-
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  21 ++-
 .../pb/ReportNewCollectorInfoRequestPBImpl.java |   4 +-
 .../server/api/records/AppCollectorData.java|  27 +++-
 .../records/impl/pb/AppCollectorDataPBImpl.java |  29 +++-
 .../yarn_server_common_service_protos.proto |   2 +
 .../java/org/apache/hadoop/yarn/TestRPC.java|  30 +++-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |   4 +-
 .../nodemanager/NodeStatusUpdaterImpl.java  |   1 -
 .../application/ApplicationImpl.java|   2 +-
 .../amrmproxy/MockResourceManagerFacade.java|   2 +-
 .../ApplicationMasterService.java   |  10 +-
 .../server/resourcemanager/rmapp/RMApp.java |  15 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  10 +-
 .../applicationsmanager/MockAsm.java|   6 +
 .../server/resourcemanager/rmapp/MockRMApp.java |   7 +-
 .../TestTimelineServiceClientIntegration.java   |   2 +-
 .../security/TestTimelineAuthFilterForV2.java   | 121 +++
 .../collector/AppLevelTimelineCollector.java|  24 +++
 .../AppLevelTimelineCollectorWithAgg.java   |   4 +-
 .../collector/NodeTimelineCollectorManager.java |  83 +--
 .../PerNodeTimelineCollectorsAuxService.java|   7 +-
 ...neV2DelegationTokenSecretManagerService.java |  31 
 .../TestNMTimelineCollectorManager.java |   4 +-
 40 files changed, 887 insertions(+), 162 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79806939/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 218e218..d681940 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -860,13 +860,16 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 handleUpdatedNodes(response);
 handleJobPriorityChange(response);
 // handle receiving the timeline collector address for this app
-String collectorAddr = response.getCollectorAddr();
+String collectorAddr = null;
+if (response.getCollectorInfo() != null) {
+  collectorAddr = response.getCollectorInfo().getCollectorAddr();
+}
+
 MRAppMaster.RunningAppContext appContext =
 (MRAppMaster.RunningAppContext)this.getContext();
 if (collectorAddr != null && !collectorAddr.isEmpty()
 && appContext.getTimelineV2Client() != null) {
-  

hadoop git commit: HDFS-12287. Remove a no-longer applicable TODO comment in DatanodeManager. Contributed by Chen Liang.

2017-08-11 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk a32e0138f -> f13ca9495


HDFS-12287. Remove a no-longer applicable TODO comment in DatanodeManager. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f13ca949
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f13ca949
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f13ca949

Branch: refs/heads/trunk
Commit: f13ca94954072c9b898b142a5ff86f2c1f3ee55a
Parents: a32e013
Author: Yiqun Lin 
Authored: Fri Aug 11 14:13:45 2017 +0800
Committer: Yiqun Lin 
Committed: Fri Aug 11 14:13:45 2017 +0800

--
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f13ca949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index d705fec..78783ca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -212,8 +212,6 @@ public class DatanodeManager {
 this.namesystem = namesystem;
 this.blockManager = blockManager;
 
-// TODO: Enables DFSNetworkTopology by default after more stress
-// testings/validations.
 this.useDfsNetworkTopology = conf.getBoolean(
 DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY,
 DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org