hadoop git commit: YARN-8827. Plumb aggregated application resource utilization from the NM to RM. (asuresh)

2018-10-09 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 bb5991423 -> 608f00998


YARN-8827. Plumb aggregated application resource utilization from the NM to RM. 
(asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/608f0099
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/608f0099
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/608f0099

Branch: refs/heads/YARN-1011
Commit: 608f009980b779857b3660a3bd6c70ee44738b8f
Parents: bb59914
Author: Arun Suresh 
Authored: Tue Oct 9 21:09:50 2018 -0700
Committer: Arun Suresh 
Committed: Tue Oct 9 21:09:50 2018 -0700

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   6 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   6 +
 .../yarn/api/records/ResourceUtilization.java   |  24 ++
 .../yarn/server/api/records/NodeStatus.java |  19 +
 .../api/records/impl/pb/NodeStatusPBImpl.java   |  59 +++
 .../main/proto/yarn_server_common_protos.proto  |   6 +
 .../nodemanager/NodeStatusUpdaterImpl.java  |   9 +
 .../monitor/ContainersMonitor.java  |  33 ++
 .../monitor/ContainersMonitorImpl.java  |  53 ++-
 .../server/resourcemanager/ResourceManager.java |   8 +
 .../ResourceUtilizationAggregator.java  | 178 +
 .../server/resourcemanager/rmnode/RMNode.java   |   7 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  26 +-
 .../rmnode/RMNodeStatusEvent.java   |   7 +-
 .../yarn/server/resourcemanager/MockNM.java |  26 +-
 .../yarn/server/resourcemanager/MockNodes.java  |  16 +
 .../TestResourceUtilizationAggregator.java  | 357 +++
 17 files changed, 820 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/608f0099/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 350f4a3..716a1d8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -199,6 +199,12 @@ public class NodeInfo {
 }
 
 @Override
+public Map
+getAggregatedAppUtilizations() {
+  return null;
+}
+
+@Override
 public ResourceUtilization getNodeUtilization() {
   return null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608f0099/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index bb6fb9d..0e2a84e 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -225,6 +225,12 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
+  public Map
+  getAggregatedAppUtilizations() {
+return node.getAggregatedAppUtilizations();
+  }
+
+  @Override
   public Resource getPhysicalResource() {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608f0099/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
index 2ae4872..c340093 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
@@ -53,6 +53,16 @@ public abstract class ResourceUtilization implements
   }
 
   /**
+   * Helper function to return a zero-ed out Utilization.
+   * @return New Resource Utilization.
+   */
+  @Public
+  @Unstable
+  public static ResourceUtilization newZero() {
+return newInstance(0, 0, 0.0f);
+  }
+
+  /**
* Get used virtual memory.
*
* @return virtual memory in MB
@@ -157,6 +167,20 @@ public abstract class ResourceUtilization implements
   }
 
   /**
+   * Add 

hadoop git commit: HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect with striped reads. Contributed by Xiao Chen, Hrishikesh Gadre.

2018-10-09 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 af85ce6ae -> b170de8be


HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect 
with striped reads.
Contributed by Xiao Chen, Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 323b76bccfa153ef5ba52dc14876283d05618739)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b170de8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b170de8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b170de8b

Branch: refs/heads/branch-3.0
Commit: b170de8be5234015794d1e9eb09f5e69c7a2af25
Parents: af85ce6
Author: Hrishikesh Gadre 
Authored: Tue Oct 9 16:42:22 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 9 19:54:56 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  5 ++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 20 ++
 .../org/apache/hadoop/hdfs/ReaderStrategy.java  |  7 ---
 .../org/apache/hadoop/hdfs/StripeReader.java| 23 ---
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  | 10 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 65 ++--
 .../erasurecode/ErasureCodingWorker.java|  3 +-
 .../erasurecode/StripedBlockReader.java | 14 +++--
 .../datanode/erasurecode/StripedReader.java | 17 ++---
 .../erasurecode/StripedReconstructor.java   |  3 +-
 .../TestDistributedFileSystemWithECFile.java| 38 
 11 files changed, 167 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b170de8b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index ae24572..827db47 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -89,6 +89,8 @@ import com.google.common.annotations.VisibleForTesting;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /
  * DFSInputStream provides bytes from a named file.  It handles
  * negotiation of the namenode and various datanodes as necessary.
@@ -768,6 +770,9 @@ public class DFSInputStream extends FSInputStream
 // got a EOS from reader though we expect more data on it.
 throw new IOException("Unexpected EOS from the reader");
   }
+  updateReadStatistics(readStatistics, result, blockReader);
+  dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
+  result);
   return result;
 } catch (ChecksumException ce) {
   throw ce;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b170de8b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 190ba8e..9ec3e0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -53,6 +53,8 @@ import java.util.Collection;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /**
  * DFSStripedInputStream reads from striped block groups.
  */
@@ -327,6 +329,24 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   /**
+   * Update read statistics. Note that this has to be done on the thread that
+   * initiates the read, rather than inside each async thread, for
+   * {@link org.apache.hadoop.fs.FileSystem.Statistics} to work correctly with
+   * its ThreadLocal.
+   *
+   * @param stats striped read stats
+   */
+  void updateReadStats(final StripedBlockUtil.BlockReadStats stats) {
+if (stats == null) {
+  return;
+}
+updateReadStatistics(readStatistics, stats.getBytesRead(),
+stats.isShortCircuit(), stats.getNetworkDistance());
+dfsClient.updateFileSystemReadStats(stats.getNetworkDistance(),
+stats.getBytesRead());
+  }
+
+  

hadoop git commit: HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect with striped reads. Contributed by Xiao Chen, Hrishikesh Gadre.

2018-10-09 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3968ce107 -> 323b76bcc


HDFS-13926. ThreadLocal aggregations for FileSystem.Statistics are incorrect 
with striped reads.
Contributed by Xiao Chen, Hrishikesh Gadre.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/323b76bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/323b76bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/323b76bc

Branch: refs/heads/branch-3.1
Commit: 323b76bccfa153ef5ba52dc14876283d05618739
Parents: 3968ce1
Author: Hrishikesh Gadre 
Authored: Tue Oct 9 16:42:22 2018 -0700
Committer: Xiao Chen 
Committed: Tue Oct 9 19:54:34 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  5 ++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 20 ++
 .../org/apache/hadoop/hdfs/ReaderStrategy.java  |  7 ---
 .../org/apache/hadoop/hdfs/StripeReader.java| 23 ---
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  | 10 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 65 ++--
 .../erasurecode/ErasureCodingWorker.java|  3 +-
 .../erasurecode/StripedBlockReader.java | 14 +++--
 .../datanode/erasurecode/StripedReader.java | 17 ++---
 .../erasurecode/StripedReconstructor.java   |  3 +-
 .../TestDistributedFileSystemWithECFile.java| 38 
 11 files changed, 167 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/323b76bc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 75eb2ea..98c2c9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -89,6 +89,8 @@ import com.google.common.annotations.VisibleForTesting;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /
  * DFSInputStream provides bytes from a named file.  It handles
  * negotiation of the namenode and various datanodes as necessary.
@@ -768,6 +770,9 @@ public class DFSInputStream extends FSInputStream
 // got a EOS from reader though we expect more data on it.
 throw new IOException("Unexpected EOS from the reader");
   }
+  updateReadStatistics(readStatistics, result, blockReader);
+  dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
+  result);
   return result;
 } catch (ChecksumException ce) {
   throw ce;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/323b76bc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 190ba8e..9ec3e0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -53,6 +53,8 @@ import java.util.Collection;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
 
+import static org.apache.hadoop.hdfs.util.IOUtilsClient.updateReadStatistics;
+
 /**
  * DFSStripedInputStream reads from striped block groups.
  */
@@ -327,6 +329,24 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   /**
+   * Update read statistics. Note that this has to be done on the thread that
+   * initiates the read, rather than inside each async thread, for
+   * {@link org.apache.hadoop.fs.FileSystem.Statistics} to work correctly with
+   * its ThreadLocal.
+   *
+   * @param stats striped read stats
+   */
+  void updateReadStats(final StripedBlockUtil.BlockReadStats stats) {
+if (stats == null) {
+  return;
+}
+updateReadStatistics(readStatistics, stats.getBytesRead(),
+stats.isShortCircuit(), stats.getNetworkDistance());
+dfsClient.updateFileSystemReadStats(stats.getNetworkDistance(),
+stats.getBytesRead());
+  }
+
+  /**
* Seek to a new arbitrary location.
*/
   @Override


[2/2] hadoop git commit: YARN-8858. CapacityScheduler should respect maximum node resource when per-queue maximum-allocation is being used. Contributed by Wangda Tan.

2018-10-09 Thread wwei
YARN-8858. CapacityScheduler should respect maximum node resource when 
per-queue maximum-allocation is being used. Contributed by Wangda Tan.

(cherry picked from commit edce866489d83744f3f47a3b884b0c6136885e4a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc1bf7f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc1bf7f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc1bf7f8

Branch: refs/heads/branch-2
Commit: cc1bf7f8948ba10151ec0dc80ce6131de38b5a45
Parents: d412785
Author: Weiwei Yang 
Authored: Wed Oct 10 09:32:17 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Oct 10 10:17:18 2018 +0800

--
 .../scheduler/ClusterNodeTracker.java   | 11 +
 .../scheduler/capacity/CapacityScheduler.java   | 12 -
 .../capacity/TestContainerAllocation.java   | 52 
 3 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1bf7f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index 010e645..b23b2be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -228,6 +229,16 @@ public class ClusterNodeTracker {
 }
   }
 
+  @VisibleForTesting
+  public void setForceConfiguredMaxAllocation(boolean flag) {
+writeLock.lock();
+try {
+  forceConfiguredMaxAllocation = flag;
+} finally {
+  writeLock.unlock();
+}
+  }
+
   private void updateMaxResources(SchedulerNode node, boolean add) {
 Resource totalResource = node.getTotalResource();
 writeLock.lock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1bf7f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index aaae86d..207e059 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2192,7 +2192,17 @@ public class CapacityScheduler extends
   LOG.error("queue " + queueName + " is not an leaf queue");
   return getMaximumResourceCapability();
 }
-return ((LeafQueue)queue).getMaximumAllocation();
+
+// queue.getMaxAllocation returns *configured* maximum allocation.
+// getMaximumResourceCapability() returns maximum allocation considers
+// per-node maximum resources. So return (component-wise) min of the two.
+
+Resource queueMaxAllocation = ((LeafQueue)queue).getMaximumAllocation();
+Resource clusterMaxAllocationConsiderNodeMax =
+getMaximumResourceCapability();
+
+return Resources.componentwiseMin(queueMaxAllocation,
+clusterMaxAllocationConsiderNodeMax);
   }
 
   private String handleMoveToPlanQueue(String targetQueueName) {


[1/2] hadoop git commit: YARN-8720. CapacityScheduler does not enforce max resource allocation check at queue level. Contributed by Tarun Parimi.

2018-10-09 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b0900ad31 -> cc1bf7f89


YARN-8720. CapacityScheduler does not enforce max resource allocation check at 
queue level. Contributed by Tarun Parimi.

(cherry picked from commit f1a893fdbc2dbe949cae786f08bdb2651b88d673)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4127851
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4127851
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4127851

Branch: refs/heads/branch-2
Commit: d412785175a89b584bdcbffbe47175a8a9d771c4
Parents: b0900ad
Author: Weiwei Yang 
Authored: Fri Sep 14 16:33:51 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Oct 10 10:16:16 2018 +0800

--
 .../hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java | 3 ++-
 .../apache/hadoop/yarn/server/resourcemanager/RMAppManager.java | 5 +++--
 .../hadoop/yarn/server/resourcemanager/TestAppManager.java  | 3 +++
 .../hadoop/yarn/server/resourcemanager/TestClientRMService.java | 3 +++
 4 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4127851/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index eea42e7..273e0cd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -207,7 +207,8 @@ final class DefaultAMSProcessor implements 
ApplicationMasterServiceProcessor {
   }
 }
 
-Resource maximumCapacity = getScheduler().getMaximumResourceCapability();
+Resource maximumCapacity =
+getScheduler().getMaximumResourceCapability(app.getQueue());
 
 // sanity check
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4127851/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 3b47db2..2ffdbc3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -498,10 +498,11 @@ public class RMAppManager implements 
EventHandler,
 }
 
 // Normalize all requests
+String queue = submissionContext.getQueue();
 for (ResourceRequest amReq : amReqs) {
   SchedulerUtils.normalizeAndValidateRequest(amReq,
-  scheduler.getMaximumResourceCapability(),
-  submissionContext.getQueue(), scheduler, isRecovery, rmContext);
+  scheduler.getMaximumResourceCapability(queue),
+  queue, scheduler, isRecovery, rmContext);
 
   amReq.setCapability(
   scheduler.getNormalizedResource(amReq.getCapability()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4127851/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index adf6bbe..0623cb0 100644
--- 

hadoop git commit: YARN-8858. CapacityScheduler should respect maximum node resource when per-queue maximum-allocation is being used. Contributed by Wangda Tan.

2018-10-09 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 590a4e9d1 -> e75191a62


YARN-8858. CapacityScheduler should respect maximum node resource when 
per-queue maximum-allocation is being used. Contributed by Wangda Tan.

(cherry picked from commit edce866489d83744f3f47a3b884b0c6136885e4a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e75191a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e75191a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e75191a6

Branch: refs/heads/branch-2.9
Commit: e75191a62e29f4c39de67fbfe69841290d2c3b34
Parents: 590a4e9
Author: Weiwei Yang 
Authored: Wed Oct 10 09:32:17 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Oct 10 10:04:29 2018 +0800

--
 .../scheduler/ClusterNodeTracker.java   | 11 +
 .../scheduler/capacity/CapacityScheduler.java   | 12 -
 .../capacity/TestContainerAllocation.java   | 52 
 3 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e75191a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index 010e645..b23b2be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -228,6 +229,16 @@ public class ClusterNodeTracker {
 }
   }
 
+  @VisibleForTesting
+  public void setForceConfiguredMaxAllocation(boolean flag) {
+writeLock.lock();
+try {
+  forceConfiguredMaxAllocation = flag;
+} finally {
+  writeLock.unlock();
+}
+  }
+
   private void updateMaxResources(SchedulerNode node, boolean add) {
 Resource totalResource = node.getTotalResource();
 writeLock.lock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e75191a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 65f2cba..fae9199 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2199,7 +2199,17 @@ public class CapacityScheduler extends
   LOG.error("queue " + queueName + " is not an leaf queue");
   return getMaximumResourceCapability();
 }
-return ((LeafQueue)queue).getMaximumAllocation();
+
+// queue.getMaxAllocation returns *configured* maximum allocation.
+// getMaximumResourceCapability() returns maximum allocation considers
+// per-node maximum resources. So return (component-wise) min of the two.
+
+Resource queueMaxAllocation = ((LeafQueue)queue).getMaximumAllocation();
+Resource clusterMaxAllocationConsiderNodeMax =
+getMaximumResourceCapability();
+
+return Resources.componentwiseMin(queueMaxAllocation,
+clusterMaxAllocationConsiderNodeMax);
   }
 
   private String handleMoveToPlanQueue(String targetQueueName) {


hadoop git commit: YARN-8858. CapacityScheduler should respect maximum node resource when per-queue maximum-allocation is being used. Contributed by Wangda Tan.

2018-10-09 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 8a9f61be4 -> af85ce6ae


YARN-8858. CapacityScheduler should respect maximum node resource when 
per-queue maximum-allocation is being used. Contributed by Wangda Tan.

(cherry picked from commit edce866489d83744f3f47a3b884b0c6136885e4a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af85ce6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af85ce6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af85ce6a

Branch: refs/heads/branch-3.0
Commit: af85ce6ae44966a19f7c91793f77136750ca0597
Parents: 8a9f61b
Author: Weiwei Yang 
Authored: Wed Oct 10 09:32:17 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Oct 10 09:56:16 2018 +0800

--
 .../scheduler/ClusterNodeTracker.java   | 11 +
 .../scheduler/capacity/CapacityScheduler.java   | 12 -
 .../capacity/TestContainerAllocation.java   | 52 
 3 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af85ce6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index 66d8810..be7b3a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -236,6 +237,16 @@ public class ClusterNodeTracker {
 }
   }
 
+  @VisibleForTesting
+  public void setForceConfiguredMaxAllocation(boolean flag) {
+writeLock.lock();
+try {
+  forceConfiguredMaxAllocation = flag;
+} finally {
+  writeLock.unlock();
+}
+  }
+
   private void updateMaxResources(SchedulerNode node, boolean add) {
 Resource totalResource = node.getTotalResource();
 ResourceInformation[] totalResources;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af85ce6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 6111752..a1d854b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2261,7 +2261,17 @@ public class CapacityScheduler extends
   LOG.error("queue " + queueName + " is not an leaf queue");
   return getMaximumResourceCapability();
 }
-return ((LeafQueue)queue).getMaximumAllocation();
+
+// queue.getMaxAllocation returns *configured* maximum allocation.
+// getMaximumResourceCapability() returns maximum allocation considers
+// per-node maximum resources. So return (component-wise) min of the two.
+
+Resource queueMaxAllocation = ((LeafQueue)queue).getMaximumAllocation();
+Resource clusterMaxAllocationConsiderNodeMax =
+getMaximumResourceCapability();
+
+return Resources.componentwiseMin(queueMaxAllocation,
+clusterMaxAllocationConsiderNodeMax);
   }
 
   private String handleMoveToPlanQueue(String targetQueueName) {


hadoop git commit: YARN-8858. CapacityScheduler should respect maximum node resource when per-queue maximum-allocation is being used. Contributed by Wangda Tan.

2018-10-09 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 5813c1de5 -> 3968ce107


YARN-8858. CapacityScheduler should respect maximum node resource when 
per-queue maximum-allocation is being used. Contributed by Wangda Tan.

(cherry picked from commit edce866489d83744f3f47a3b884b0c6136885e4a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3968ce10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3968ce10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3968ce10

Branch: refs/heads/branch-3.1
Commit: 3968ce1073856b64484576b5529f068eb2c10ca5
Parents: 5813c1d
Author: Weiwei Yang 
Authored: Wed Oct 10 09:32:17 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Oct 10 09:48:56 2018 +0800

--
 .../scheduler/ClusterNodeTracker.java   | 11 +
 .../scheduler/capacity/CapacityScheduler.java   | 12 -
 .../capacity/TestContainerAllocation.java   | 52 
 3 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3968ce10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index 66d8810..be7b3a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -236,6 +237,16 @@ public class ClusterNodeTracker {
 }
   }
 
+  @VisibleForTesting
+  public void setForceConfiguredMaxAllocation(boolean flag) {
+writeLock.lock();
+try {
+  forceConfiguredMaxAllocation = flag;
+} finally {
+  writeLock.unlock();
+}
+  }
+
   private void updateMaxResources(SchedulerNode node, boolean add) {
 Resource totalResource = node.getTotalResource();
 ResourceInformation[] totalResources;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3968ce10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 6fcbc9f..955f12c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2445,7 +2445,17 @@ public class CapacityScheduler extends
   LOG.error("queue " + queueName + " is not an leaf queue");
   return getMaximumResourceCapability();
 }
-return ((LeafQueue)queue).getMaximumAllocation();
+
+// queue.getMaxAllocation returns *configured* maximum allocation.
+// getMaximumResourceCapability() returns maximum allocation considers
+// per-node maximum resources. So return (component-wise) min of the two.
+
+Resource queueMaxAllocation = ((LeafQueue)queue).getMaximumAllocation();
+Resource clusterMaxAllocationConsiderNodeMax =
+getMaximumResourceCapability();
+
+return Resources.componentwiseMin(queueMaxAllocation,
+clusterMaxAllocationConsiderNodeMax);
   }
 
   private String handleMoveToPlanQueue(String targetQueueName) {


hadoop git commit: YARN-8858. CapacityScheduler should respect maximum node resource when per-queue maximum-allocation is being used. Contributed by Wangda Tan.

2018-10-09 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 bc400c8a3 -> eb0147a4c


YARN-8858. CapacityScheduler should respect maximum node resource when 
per-queue maximum-allocation is being used. Contributed by Wangda Tan.

(cherry picked from commit edce866489d83744f3f47a3b884b0c6136885e4a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb0147a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb0147a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb0147a4

Branch: refs/heads/branch-3.2
Commit: eb0147a4c7b719e4bd605b85fa39a9281f25d870
Parents: bc400c8
Author: Weiwei Yang 
Authored: Wed Oct 10 09:32:17 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Oct 10 09:42:45 2018 +0800

--
 .../scheduler/ClusterNodeTracker.java   | 11 +
 .../scheduler/capacity/CapacityScheduler.java   | 12 -
 .../capacity/TestContainerAllocation.java   | 52 
 3 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb0147a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index 8c7e447..0f72c76 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -258,6 +259,16 @@ public class ClusterNodeTracker {
 }
   }
 
+  @VisibleForTesting
+  public void setForceConfiguredMaxAllocation(boolean flag) {
+writeLock.lock();
+try {
+  forceConfiguredMaxAllocation = flag;
+} finally {
+  writeLock.unlock();
+}
+  }
+
   private void updateMaxResources(SchedulerNode node, boolean add) {
 Resource totalResource = node.getTotalResource();
 ResourceInformation[] totalResources;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb0147a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 75d6144..fddd361 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2581,7 +2581,17 @@ public class CapacityScheduler extends
   LOG.error("queue " + queueName + " is not an leaf queue");
   return getMaximumResourceCapability();
 }
-return ((LeafQueue)queue).getMaximumAllocation();
+
+// queue.getMaxAllocation returns *configured* maximum allocation.
+// getMaximumResourceCapability() returns maximum allocation considers
+// per-node maximum resources. So return (component-wise) min of the two.
+
+Resource queueMaxAllocation = ((LeafQueue)queue).getMaximumAllocation();
+Resource clusterMaxAllocationConsiderNodeMax =
+getMaximumResourceCapability();
+
+return Resources.componentwiseMin(queueMaxAllocation,
+clusterMaxAllocationConsiderNodeMax);
   }
 
   private String handleMoveToPlanQueue(String targetQueueName) {


hadoop git commit: YARN-8858. CapacityScheduler should respect maximum node resource when per-queue maximum-allocation is being used. Contributed by Wangda Tan.

2018-10-09 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/trunk f068296f8 -> edce86648


YARN-8858. CapacityScheduler should respect maximum node resource when 
per-queue maximum-allocation is being used. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edce8664
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edce8664
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edce8664

Branch: refs/heads/trunk
Commit: edce866489d83744f3f47a3b884b0c6136885e4a
Parents: f068296
Author: Weiwei Yang 
Authored: Wed Oct 10 09:32:17 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Oct 10 09:32:27 2018 +0800

--
 .../scheduler/ClusterNodeTracker.java   | 11 +
 .../scheduler/capacity/CapacityScheduler.java   | 12 -
 .../capacity/TestContainerAllocation.java   | 52 
 3 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edce8664/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index 8c7e447..0f72c76 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -258,6 +259,16 @@ public class ClusterNodeTracker {
 }
   }
 
+  @VisibleForTesting
+  public void setForceConfiguredMaxAllocation(boolean flag) {
+writeLock.lock();
+try {
+  forceConfiguredMaxAllocation = flag;
+} finally {
+  writeLock.unlock();
+}
+  }
+
   private void updateMaxResources(SchedulerNode node, boolean add) {
 Resource totalResource = node.getTotalResource();
 ResourceInformation[] totalResources;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/edce8664/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 75d6144..fddd361 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2581,7 +2581,17 @@ public class CapacityScheduler extends
   LOG.error("queue " + queueName + " is not an leaf queue");
   return getMaximumResourceCapability();
 }
-return ((LeafQueue)queue).getMaximumAllocation();
+
+// queue.getMaxAllocation returns *configured* maximum allocation.
+// getMaximumResourceCapability() returns maximum allocation considers
+// per-node maximum resources. So return (component-wise) min of the two.
+
+Resource queueMaxAllocation = ((LeafQueue)queue).getMaximumAllocation();
+Resource clusterMaxAllocationConsiderNodeMax =
+getMaximumResourceCapability();
+
+return Resources.componentwiseMin(queueMaxAllocation,
+clusterMaxAllocationConsiderNodeMax);
   }
 
   private String handleMoveToPlanQueue(String targetQueueName) {


hadoop git commit: HDDS-443. Create reusable ProgressBar utility for freon tests. Contributed by Zsolt Horvath.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6fa3feb57 -> f068296f8


HDDS-443. Create reusable ProgressBar utility for freon tests.
Contributed by Zsolt Horvath.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f068296f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f068296f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f068296f

Branch: refs/heads/trunk
Commit: f068296f8a88fc2a4c7b1680bc190c5fa7fc2469
Parents: 6fa3feb
Author: Anu Engineer 
Authored: Tue Oct 9 18:18:19 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 18:19:00 2018 -0700

--
 hadoop-ozone/tools/pom.xml  |   6 +
 .../apache/hadoop/ozone/freon/ProgressBar.java  | 210 +++
 .../hadoop/ozone/freon/TestProgressBar.java | 112 ++
 3 files changed, 328 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f068296f/hadoop-ozone/tools/pom.xml
--
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index ac819b5..2d273d1 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -77,6 +77,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   test
   test-jar
 
+
+  org.mockito
+  mockito-core
+  2.15.0
+  test
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f068296f/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
new file mode 100644
index 000..a8d7e73
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.io.PrintStream;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+/**
+ * Run an arbitrary code and print progress on the provided stream. The
+ * progressbar stops when: - the provided currentvalue is less the the maxvalue
+ * - exception thrown
+ */
+public class ProgressBar {
+
+  private static final long REFRESH_INTERVAL = 1000L;
+
+  private PrintStream stream;
+  private AtomicLong currentValue;
+  private long maxValue;
+  private Thread progressBar;
+  private volatile boolean exception = false;
+  private long startTime;
+
+  /**
+   * @param stream Used to display the progress
+   * @param maxValue Maximum value of the progress
+   */
+  ProgressBar(PrintStream stream, long maxValue) {
+this.stream = stream;
+this.maxValue = maxValue;
+this.currentValue = new AtomicLong(0);
+this.progressBar = new Thread(new ProgressBarThread());
+  }
+
+  /**
+   * Start a task with a progessbar without any in/out parameters Runnable used
+   * just a task wrapper.
+   *
+   * @param task Runnable
+   */
+  public void start(Runnable task) {
+
+startTime = System.nanoTime();
+
+try {
+
+  progressBar.start();
+  task.run();
+
+} catch (Exception e) {
+  exception = true;
+} finally {
+
+  try {
+progressBar.join();
+  } catch (InterruptedException e) {
+e.printStackTrace();
+  }
+}
+  }
+
+  /**
+   * Start a task with only out parameters.
+   *
+   * @param task Supplier that represents the task
+   * @param  Generic return type
+   * @return Whatever the supllier produces
+   */
+  public  T start(Supplier task) {
+
+startTime = System.nanoTime();
+T result = null;
+
+try {
+
+  progressBar.start();
+  result = task.get();
+
+} catch (Exception e) {
+  exception = true;
+} finally {
+
+  try {
+progressBar.join();
+  } catch (InterruptedException e) {
+ 

hadoop git commit: HADOOP-15832. Upgrade BouncyCastle to 1.60. Contributed by Robert Kanter.

2018-10-09 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 794c0451c -> 6fa3feb57


HADOOP-15832. Upgrade BouncyCastle to 1.60. Contributed by Robert Kanter.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fa3feb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fa3feb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fa3feb5

Branch: refs/heads/trunk
Commit: 6fa3feb577d05d73a2eb1bc8e39800326f678c31
Parents: 794c045
Author: Akira Ajisaka 
Authored: Wed Oct 10 10:16:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed Oct 10 10:16:57 2018 +0900

--
 .../hadoop-client-check-invariants/pom.xml  |  2 ++
 .../hadoop-client-check-test-invariants/pom.xml |  2 ++
 .../hadoop-client-minicluster/pom.xml   |  2 ++
 .../hadoop-client-runtime/pom.xml   |  2 ++
 hadoop-common-project/hadoop-common/pom.xml |  2 +-
 hadoop-common-project/hadoop-kms/pom.xml|  2 +-
 hadoop-hdds/server-scm/pom.xml  |  2 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |  2 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  2 +-
 .../hadoop-mapreduce-client-app/pom.xml | 20 
 .../hadoop-mapreduce-client-jobclient/pom.xml   |  7 ++-
 hadoop-ozone/ozone-manager/pom.xml  |  2 +-
 hadoop-project/pom.xml  | 12 +---
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |  2 +-
 .../pom.xml |  2 +-
 .../hadoop-yarn-server-tests/pom.xml|  2 +-
 .../hadoop-yarn-server-web-proxy/pom.xml|  8 
 18 files changed, 61 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fa3feb5/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml 
b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
index 4319a8b..f062986 100644
--- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
@@ -90,6 +90,8 @@
 log4j:log4j
 
 com.google.code.findbugs:jsr305
+
+org.bouncycastle:*
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fa3feb5/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml 
b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
index 70a987f..c02ce56 100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
@@ -98,6 +98,8 @@
  org.hamcrest:hamcrest-core
 
 com.google.code.findbugs:jsr305
+
+org.bouncycastle:*
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fa3feb5/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index dac5e69..70af1ed 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -671,6 +671,8 @@
   javax.annotation:javax.annotation-api
   org.eclipse.jetty:jetty-jndi
   
+  
+  org.bouncycastle:*
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fa3feb5/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 8afc89e..1e98a61 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -162,6 +162,8 @@
   org.eclipse.jetty:jetty-servlet
   org.eclipse.jetty:jetty-security
   org.ow2.asm:*
+  
+  org.bouncycastle:*
 
   
   


hadoop git commit: HDDS-604. Correct Ozone getOzoneConf description. Contributed by Dinesh Chitlangia.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4de2dc269 -> 794c0451c


HDDS-604. Correct Ozone getOzoneConf description.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/794c0451
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/794c0451
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/794c0451

Branch: refs/heads/trunk
Commit: 794c0451cffbe147234a2417943709c121d06620
Parents: 4de2dc2
Author: Anu Engineer 
Authored: Tue Oct 9 17:52:06 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 17:52:06 2018 -0700

--
 hadoop-ozone/common/src/main/bin/ozone  |  4 ++--
 hadoop-ozone/common/src/main/bin/start-ozone.sh | 10 +-
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  | 10 +-
 hadoop-ozone/docs/content/CommandShell.md   |  2 +-
 4 files changed, 13 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/794c0451/hadoop-ozone/common/src/main/bin/ozone
--
diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index 4b50771..2ba9ea7 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -39,7 +39,7 @@ function hadoop_usage
   hadoop_add_subcommand "fs" client "run a filesystem command on Ozone file 
system. Equivalent to 'hadoop fs'"
   hadoop_add_subcommand "genconf" client "generate minimally required ozone 
configs and output to ozone-site.xml in specified path"
   hadoop_add_subcommand "genesis" client "runs a collection of ozone 
benchmarks to help with tuning."
-  hadoop_add_subcommand "getozoneconf" client "get ozone config values from 
configuration"
+  hadoop_add_subcommand "getconf" client "get ozone config values from 
configuration"
   hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode 
or DataNode."
   hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata 
into relational data"
   hadoop_add_subcommand "om" daemon "Ozone Manager"
@@ -94,7 +94,7 @@ function ozonecmd_case
   HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
 ;;
-getozoneconf)
+getconf)
   HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/794c0451/hadoop-ozone/common/src/main/bin/start-ozone.sh
--
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh 
b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index cfb54e0..4c022fb 100755
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -67,8 +67,8 @@ fi
 #Add other possible options
 nameStartOpt="$nameStartOpt $*"
 
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
 
 if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} 
== "true" ]]; then
   echo "Ozone is not supported in a security enabled cluster."
@@ -77,7 +77,7 @@ fi
 
 #-
 # Check if ozone is enabled
-OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
+OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey ozone.enabled 
| tr '[:upper:]' '[:lower:]' 2>&-)
 if [[ "${OZONE_ENABLED}" != "true" ]]; then
   echo "Operation is not supported because ozone is not enabled."
   exit -1
@@ -96,7 +96,7 @@ hadoop_uservar_su hdfs datanode 
"${HADOOP_HDFS_HOME}/bin/ozone" \
 
 #-
 # Ozone ozonemanager nodes
-OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 
2>/dev/null)
+OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -ozonemanagers 2>/dev/null)
 echo "Starting Ozone Manager nodes [${OM_NODES}]"
 if [[ "${OM_NODES}" == "0.0.0.0" ]]; then
   OM_NODES=$(hostname)
@@ -113,7 +113,7 @@ HADOOP_JUMBO_RETCOUNTER=$?
 
 #-
 # Ozone storagecontainermanager nodes

hadoop git commit: HDDS-568. ozone sh volume info, update, delete operations fail when volume name is not prefixed by /. Contributed by Dinesh Chitlangia.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 605622c87 -> 4de2dc269


HDDS-568. ozone sh volume info, update, delete operations fail when volume name 
is not prefixed by /.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4de2dc26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4de2dc26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4de2dc26

Branch: refs/heads/trunk
Commit: 4de2dc2699fc371b2de83ba55ecbcecef1f0423b
Parents: 605622c
Author: Anu Engineer 
Authored: Tue Oct 9 17:16:52 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 17:32:04 2018 -0700

--
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 48 
 .../hadoop/ozone/web/ozShell/Handler.java   | 28 
 .../web/ozShell/volume/DeleteVolumeHandler.java | 12 +
 .../web/ozShell/volume/InfoVolumeHandler.java   | 22 +
 .../web/ozShell/volume/UpdateVolumeHandler.java | 12 +
 5 files changed, 79 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4de2dc26/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 6e73b8c..d5f2554 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -283,6 +283,33 @@ public class TestOzoneShell {
   GenericTestUtils.assertExceptionContains(
   "Info Volume failed, error:VOLUME_NOT_FOUND", e);
 }
+
+
+volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+volumeArgs = VolumeArgs.newBuilder()
+.setOwner("bilbo")
+.setQuota("100TB")
+.build();
+client.createVolume(volumeName, volumeArgs);
+volume = client.getVolumeDetails(volumeName);
+assertNotNull(volume);
+
+//volumeName prefixed with /
+String volumeNameWithSlashPrefix = "/" + volumeName;
+args = new String[] {"volume", "delete",
+url + "/" + volumeNameWithSlashPrefix};
+execute(shell, args);
+output = out.toString();
+assertTrue(output.contains("Volume " + volumeName + " is deleted"));
+
+// verify if volume has been deleted
+try {
+  client.getVolumeDetails(volumeName);
+  fail("Get volume call should have thrown.");
+} catch (IOException e) {
+  GenericTestUtils.assertExceptionContains(
+  "Info Volume failed, error:VOLUME_NOT_FOUND", e);
+}
   }
 
   @Test
@@ -295,6 +322,7 @@ public class TestOzoneShell {
 .build();
 client.createVolume(volumeName, volumeArgs);
 
+//volumeName supplied as-is
 String[] args = new String[] {"volume", "info", url + "/" + volumeName};
 execute(shell, args);
 
@@ -303,6 +331,17 @@ public class TestOzoneShell {
 assertTrue(output.contains("createdOn")
 && output.contains(OzoneConsts.OZONE_TIME_ZONE));
 
+//volumeName prefixed with /
+String volumeNameWithSlashPrefix = "/" + volumeName;
+args = new String[] {"volume", "info",
+url + "/" + volumeNameWithSlashPrefix};
+execute(shell, args);
+
+output = out.toString();
+assertTrue(output.contains(volumeName));
+assertTrue(output.contains("createdOn")
+&& output.contains(OzoneConsts.OZONE_TIME_ZONE));
+
 // test infoVolume with invalid volume name
 args = new String[] {"volume", "info",
 url + "/" + volumeName + "/invalid-name"};
@@ -365,6 +404,15 @@ public class TestOzoneShell {
 vol = client.getVolumeDetails(volumeName);
 assertEquals(newUser, vol.getOwner());
 
+//volume with / prefix
+String volumeWithPrefix = "/" + volumeName;
+String newUser2 = "new-user2";
+args = new String[] {"volume", "update", url + "/" + volumeWithPrefix,
+"--user", newUser2};
+execute(shell, args);
+vol = client.getVolumeDetails(volumeName);
+assertEquals(newUser2, vol.getOwner());
+
 // test error conditions
 args = new String[] {"volume", "update", url + "/invalid-volume",
 "--user", newUser};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4de2dc26/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
 

hadoop git commit: HDFS-11396. TestNameNodeMetadataConsistency#testGenerationStampInFuture timed out. Contributed by Ayush Saxena.

2018-10-09 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 614d6cf99 -> bc400c8a3


HDFS-11396. TestNameNodeMetadataConsistency#testGenerationStampInFuture timed 
out. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc400c8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc400c8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc400c8a

Branch: refs/heads/branch-3.2
Commit: bc400c8a339ffc16828d651e1fbb33a2075a25b3
Parents: 614d6cf
Author: Inigo Goiri 
Authored: Tue Oct 9 17:03:21 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Oct 9 17:04:17 2018 -0700

--
 .../hdfs/server/namenode/TestNameNodeMetadataConsistency.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc400c8a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
index ff49ddf..2677781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import com.google.common.base.Supplier;
@@ -88,6 +89,7 @@ public class TestNameNodeMetadataConsistency {
 final long genStamp = block.getGenerationStamp();
 final int datanodeIndex = 0;
 cluster.changeGenStampOfBlock(datanodeIndex, block, genStamp + 1);
+DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0));
 // stop the data node so that it won't remove block
 final DataNodeProperties dnProps = cluster.stopDataNode(datanodeIndex);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11396. TestNameNodeMetadataConsistency#testGenerationStampInFuture timed out. Contributed by Ayush Saxena.

2018-10-09 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6a06bc309 -> 605622c87


HDFS-11396. TestNameNodeMetadataConsistency#testGenerationStampInFuture timed 
out. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/605622c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/605622c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/605622c8

Branch: refs/heads/trunk
Commit: 605622c87bc109f60ee1674be37a526e44723b67
Parents: 6a06bc3
Author: Inigo Goiri 
Authored: Tue Oct 9 17:03:21 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Oct 9 17:03:21 2018 -0700

--
 .../hdfs/server/namenode/TestNameNodeMetadataConsistency.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/605622c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
index ff49ddf..2677781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import com.google.common.base.Supplier;
@@ -88,6 +89,7 @@ public class TestNameNodeMetadataConsistency {
 final long genStamp = block.getGenerationStamp();
 final int datanodeIndex = 0;
 cluster.changeGenStampOfBlock(datanodeIndex, block, genStamp + 1);
+DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0));
 // stop the data node so that it won't remove block
 final DataNodeProperties dnProps = cluster.stopDataNode(datanodeIndex);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-559. fs.default.name is deprecated. Contributed by Dinesh Chitlangia.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk c1fe657a1 -> 6a06bc309


HDDS-559. fs.default.name is deprecated.
Contributed by  Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a06bc30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a06bc30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a06bc30

Branch: refs/heads/trunk
Commit: 6a06bc309d72c766694eb6296d5f3fb5c3c597c5
Parents: c1fe657
Author: Anu Engineer 
Authored: Tue Oct 9 16:57:39 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 16:57:39 2018 -0700

--
 hadoop-ozone/docs/content/OzoneFS.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a06bc30/hadoop-ozone/docs/content/OzoneFS.md
--
diff --git a/hadoop-ozone/docs/content/OzoneFS.md 
b/hadoop-ozone/docs/content/OzoneFS.md
index d0621be..6853992 100644
--- a/hadoop-ozone/docs/content/OzoneFS.md
+++ b/hadoop-ozone/docs/content/OzoneFS.md
@@ -46,7 +46,7 @@ Please add the following entry to the core-site.xml.
   org.apache.hadoop.fs.ozone.OzoneFileSystem
 
 
-  fs.default.name
+  fs.defaultFS
   o3://localhost:9864/volume/bucket
 
 {{< /highlight >}}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-478. Log files related to each daemon doesn't have proper startup and shutdown logs. Contributed by Dinesh Chitlangia.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6a3973931 -> c1fe657a1


HDDS-478. Log files related to each daemon doesn't have proper startup and 
shutdown logs.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1fe657a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1fe657a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1fe657a

Branch: refs/heads/trunk
Commit: c1fe657a106aaae3bdf81fa4add70962aaee165b
Parents: 6a39739
Author: Anu Engineer 
Authored: Tue Oct 9 16:44:32 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 16:44:32 2018 -0700

--
 .../common/src/main/conf/om-audit-log4j2.properties   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1fe657a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
--
diff --git a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties 
b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
index 7d097a0..7be51ac 100644
--- a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
+++ b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
@@ -52,11 +52,15 @@ filter.write.onMismatch=NEUTRAL
 # TRACE (least specific, a lot of data)
 # ALL (least specific, all data)
 
-appenders=console, rolling
-appender.console.type=Console
-appender.console.name=STDOUT
-appender.console.layout.type=PatternLayout
-appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | 
%throwable{3} %n
+# Uncomment following section to enable logging to console appender also
+#appenders=console, rolling
+#appender.console.type=Console
+#appender.console.name=STDOUT
+#appender.console.layout.type=PatternLayout
+#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | 
%throwable{3} %n
+
+# Comment this line when using both console and rolling appenders
+appenders=rolling
 
 #Rolling File Appender with size & time thresholds.
 #Rolling is triggered when either threshold is breached.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8813. Improve debug messages for NM preemption of OPPORTUNISTIC containers (haibochen via rkanter)

2018-10-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 ad642186a -> bb5991423


YARN-8813. Improve debug messages for NM preemption of OPPORTUNISTIC containers 
(haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb599142
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb599142
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb599142

Branch: refs/heads/YARN-1011
Commit: bb59914237b76cb7409b63da27b21a07a4481942
Parents: ad64218
Author: Robert Kanter 
Authored: Tue Oct 9 16:15:35 2018 -0700
Committer: Robert Kanter 
Committed: Tue Oct 9 16:15:35 2018 -0700

--
 .../linux/resources/CGroupElasticMemoryController.java | 13 -
 .../linux/resources/DefaultOOMHandler.java |  3 +++
 .../monitor/ContainersMonitorImpl.java |  4 
 .../SnapshotBasedOverAllocationPreemptionPolicy.java   | 12 
 4 files changed, 27 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb599142/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
index 752c3a6..b47edbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
@@ -94,6 +94,7 @@ public class CGroupElasticMemoryController extends Thread {
 boolean controlVirtual = controlVirtualMemory && !controlPhysicalMemory;
 Runnable oomHandlerTemp =
 getDefaultOOMHandler(conf, context, oomHandlerOverride, 
controlVirtual);
+LOG.info("Using OOMHandler: " + oomHandlerTemp.getClass().getName());
 if (controlPhysicalMemory && controlVirtualMemory) {
   LOG.warn(
   NM_ELASTIC_MEMORY_CONTROL_ENABLED + " is on. " +
@@ -138,11 +139,10 @@ public class CGroupElasticMemoryController extends Thread 
{
   Configuration conf, Context context, Runnable oomHandlerLocal,
   boolean controlVirtual)
   throws YarnException {
-Class oomHandlerClass =
-conf.getClass(
-YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER,
-DefaultOOMHandler.class);
 if (oomHandlerLocal == null) {
+  Class oomHandlerClass = conf.getClass(
+  YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER,
+  DefaultOOMHandler.class);
   try {
 Constructor constr = oomHandlerClass.getConstructor(
 Context.class, boolean.class);
@@ -284,12 +284,15 @@ public class CGroupElasticMemoryController extends Thread 
{
   // This loop can be exited by terminating the process
   // with stopListening()
   while ((read = events.read(event)) == event.length) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("OOM event notification received from oom-listener");
+}
 // An OOM event has occurred
 resolveOOM(executor);
   }
 
   if (read != -1) {
-LOG.warn(String.format("Characters returned from event hander: %d",
+LOG.warn(String.format("Characters returned from event handler: %d",
 read));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb599142/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
index 86137b5..595aa70 100644
--- 

hadoop git commit: HDDS-583. SCM returns zero as the return code, even when invalid options are passed. Contributed by Namit Maheshwari.

2018-10-09 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 308c614d4 -> 056f48009


HDDS-583. SCM returns zero as the return code, even when invalid options are 
passed. Contributed by Namit Maheshwari.

(cherry picked from commit 6a39739316795a4828833e99d78aadc684270f98)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/056f4800
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/056f4800
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/056f4800

Branch: refs/heads/ozone-0.3
Commit: 056f480093938808aa016d828000a1fc99cff141
Parents: 308c614
Author: Bharat Viswanadham 
Authored: Tue Oct 9 15:21:02 2018 -0700
Committer: Bharat Viswanadham 
Committed: Tue Oct 9 15:21:50 2018 -0700

--
 .../org/apache/hadoop/hdds/scm/server/StorageContainerManager.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/056f4800/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index efd5fc5..ce2725f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -513,7 +513,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 
   private static StartupOption parseArguments(String[] args) {
 int argsLen = (args == null) ? 0 : args.length;
-StartupOption startOpt = StartupOption.HELP;
+StartupOption startOpt = null;
 if (argsLen == 0) {
   startOpt = StartupOption.REGULAR;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-583. SCM returns zero as the return code, even when invalid options are passed. Contributed by Namit Maheshwari.

2018-10-09 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5b7ba48ce -> 6a3973931


HDDS-583. SCM returns zero as the return code, even when invalid options are 
passed. Contributed by Namit Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a397393
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a397393
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a397393

Branch: refs/heads/trunk
Commit: 6a39739316795a4828833e99d78aadc684270f98
Parents: 5b7ba48
Author: Bharat Viswanadham 
Authored: Tue Oct 9 15:21:02 2018 -0700
Committer: Bharat Viswanadham 
Committed: Tue Oct 9 15:21:19 2018 -0700

--
 .../org/apache/hadoop/hdds/scm/server/StorageContainerManager.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a397393/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index efd5fc5..ce2725f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -513,7 +513,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 
   private static StartupOption parseArguments(String[] args) {
 int argsLen = (args == null) ? 0 : args.length;
-StartupOption startOpt = StartupOption.HELP;
+StartupOption startOpt = null;
 if (argsLen == 0) {
   startOpt = StartupOption.REGULAR;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-564. Update docker-hadoop-runner branch to reflect changes done in HDDS-490. Contributed by Namit Maheshwari.

2018-10-09 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/docker-hadoop-runner 08d7a0f28 -> ff717b6c8


HDDS-564. Update docker-hadoop-runner branch to reflect changes done in 
HDDS-490. Contributed by Namit Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff717b6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff717b6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff717b6c

Branch: refs/heads/docker-hadoop-runner
Commit: ff717b6c8d22f0e1fb27c745cd283925e2dd601e
Parents: 08d7a0f
Author: Arpit Agarwal 
Authored: Tue Oct 9 14:17:17 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Oct 9 14:17:17 2018 -0700

--
 scripts/starter.sh | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff717b6c/scripts/starter.sh
--
diff --git a/scripts/starter.sh b/scripts/starter.sh
index 047791b..cc6d25c5 100755
--- a/scripts/starter.sh
+++ b/scripts/starter.sh
@@ -80,7 +80,8 @@ fi
 
 if [ -n "$ENSURE_SCM_INITIALIZED" ]; then
if [ ! -f "$ENSURE_SCM_INITIALIZED" ]; then
-  /opt/hadoop/bin/ozone scm -init
+  # Improve om and scm start up options
+  /opt/hadoop/bin/ozone scm --init || /opt/hadoop/bin/ozone scm -init
fi
 fi
 
@@ -91,7 +92,8 @@ if [ -n "$ENSURE_OM_INITIALIZED" ]; then
   # Could be removed after HDFS-13203
   echo "Waiting 15 seconds for SCM startup"
   sleep 15
-  /opt/hadoop/bin/ozone om -createObjectStore
+  # Improve om and scm start up options
+  /opt/hadoop/bin/ozone om --init || /opt/hadoop/bin/ozone om 
-createObjectStore
fi
 fi
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8807. FairScheduler crashes RM with oversubscription turned on if an application is killed. (haibochen via rkanter)

2018-10-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 efd852449 -> ad642186a


YARN-8807. FairScheduler crashes RM with oversubscription turned on if an 
application is killed. (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad642186
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad642186
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad642186

Branch: refs/heads/YARN-1011
Commit: ad642186a908b3b75afb4bc6270177564a8d75d6
Parents: efd8524
Author: Robert Kanter 
Authored: Tue Oct 9 14:15:54 2018 -0700
Committer: Robert Kanter 
Committed: Tue Oct 9 14:15:54 2018 -0700

--
 .../scheduler/fair/FairScheduler.java   |   9 +-
 .../scheduler/fair/TestFairScheduler.java   | 105 +++
 2 files changed, 110 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad642186/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 744776a..44aad67 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1162,10 +1162,11 @@ public class FairScheduler extends
 for (RMContainer rmContainer : promoted)  {
   FSAppAttempt appAttempt = getSchedulerApp(
   rmContainer.getApplicationAttemptId());
-  appAttempt.opportunisticContainerPromoted(rmContainer);
-
-  promotion.put(rmContainer.getContainer(),
-  ContainerUpdateType.PROMOTE_EXECUTION_TYPE);
+  if (appAttempt != null) {
+appAttempt.opportunisticContainerPromoted(rmContainer);
+promotion.put(rmContainer.getContainer(),
+ContainerUpdateType.PROMOTE_EXECUTION_TYPE);
+  }
 }
 
 if (!promotion.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad642186/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 5847ca9..ec4f082 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -119,6 +119,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtil
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
@@ -3948,6 +3949,110 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   }
 
   @Test
+  public void testKillingApplicationWithOpportunisticContainersAssigned()
+  throws IOException {
+conf.setBoolean(YarnConfiguration.RM_SCHEDULER_OVERSUBSCRIPTION_ENABLED,
+true);
+// disable resource 

hadoop git commit: HDDS-577. Support S3 buckets as first class objects in Ozone Manager - 2. Contributed by Bharat Viswanadham.

2018-10-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk bf04f1945 -> 5b7ba48ce


HDDS-577. Support S3 buckets as first class objects in Ozone Manager - 2.
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b7ba48c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b7ba48c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b7ba48c

Branch: refs/heads/trunk
Commit: 5b7ba48cedb0d70ca154771fec48e5c4129cf29a
Parents: bf04f19
Author: Anu Engineer 
Authored: Tue Oct 9 13:37:42 2018 -0700
Committer: Anu Engineer 
Committed: Tue Oct 9 13:37:42 2018 -0700

--
 .../apache/hadoop/ozone/client/ObjectStore.java |  48 +
 .../ozone/client/protocol/ClientProtocol.java   |  35 
 .../hadoop/ozone/client/rest/RestClient.java|  25 +++
 .../hadoop/ozone/client/rpc/RpcClient.java  |  32 
 .../ozone/om/protocol/OzoneManagerProtocol.java |  25 +++
 ...neManagerProtocolClientSideTranslatorPB.java |  51 +
 .../src/main/proto/OzoneManagerProtocol.proto   |  32 
 .../ozone/client/rpc/TestOzoneRpcClient.java|  34 
 .../apache/hadoop/ozone/om/OzoneManager.java|  23 +++
 .../hadoop/ozone/om/S3BucketManagerImpl.java|  11 ++
 ...neManagerProtocolServerSideTranslatorPB.java | 187 +++
 11 files changed, 429 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b7ba48c/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index 17d1938..4196556 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -83,6 +83,54 @@ public class ObjectStore {
   }
 
   /**
+   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
+   * to access via both S3 and Ozone.
+   * @param userName - S3 user name.
+   * @param s3BucketName - S3 bucket Name.
+   * @throws IOException - On failure, throws an exception like Bucket exists.
+   */
+  public void createS3Bucket(String userName, String s3BucketName) throws
+  IOException {
+proxy.createS3Bucket(userName, s3BucketName);
+  }
+
+  /**
+   * Returns the Ozone Namespace for the S3Bucket. It will return the
+   * OzoneVolume/OzoneBucketName.
+   * @param s3BucketName  - S3 Bucket Name.
+   * @return String - The Ozone canonical name for this s3 bucket. This
+   * string is useful for mounting an OzoneFS.
+   * @throws IOException - Error is throw if the s3bucket does not exist.
+   */
+  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
+return proxy.getOzoneBucketMapping(s3BucketName);
+  }
+
+  /**
+   * Returns the corresponding Ozone volume given an S3 Bucket.
+   * @param s3BucketName - S3Bucket Name.
+   * @return String - Ozone Volume name.
+   * @throws IOException - Throws if the s3Bucket does not exist.
+   */
+  public String getOzoneVolumeName(String s3BucketName) throws IOException {
+String mapping = getOzoneBucketMapping(s3BucketName);
+return mapping.split("/")[0];
+
+  }
+
+  /**
+   * Returns the corresponding Ozone bucket name for the given S3 bucket.
+   * @param s3BucketName - S3Bucket Name.
+   * @return String - Ozone bucket Name.
+   * @throws IOException - Throws if the s3bucket does not exist.
+   */
+  public String getOzoneBucketName(String s3BucketName) throws IOException {
+String mapping = getOzoneBucketMapping(s3BucketName);
+return mapping.split("/")[1];
+  }
+
+
+  /**
* Returns the volume information.
* @param volumeName Name of the volume.
* @return OzoneVolume

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b7ba48c/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 008b69d..b750a5a 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -322,6 +322,41 @@ public interface ClientProtocol {
   throws IOException;
 
   /**
+   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
+   * to access via both S3 and Ozone.
+   * @param userName - 

hadoop git commit: YARN-8845. Removed unused hadoop.registry.rm reference. Contributed by Íñigo Goiri

2018-10-09 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 89d448102 -> 5813c1de5


YARN-8845.  Removed unused hadoop.registry.rm reference.
Contributed by Íñigo Goiri

(cherry picked from commit bf04f194568f9e81f5481b25a84ad903e3c307cf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5813c1de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5813c1de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5813c1de

Branch: refs/heads/branch-3.1
Commit: 5813c1de56970b66acff82c7a22d084d655e8841
Parents: 89d4481
Author: Eric Yang 
Authored: Tue Oct 9 15:54:45 2018 -0400
Committer: Eric Yang 
Committed: Tue Oct 9 15:56:38 2018 -0400

--
 .../src/main/resources/core-default.xml | 16 --
 .../registry/client/api/RegistryConstants.java  | 11 
 .../markdown/registry/registry-configuration.md | 56 
 .../src/site/markdown/registry/yarn-registry.md | 15 --
 4 files changed, 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5813c1de/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 8c71b42..0c3f43d 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2670,22 +2670,6 @@
   
 
   
-hadoop.registry.rm.enabled
-false
-
-  Is the registry enabled in the YARN Resource Manager?
-
-  If true, the YARN RM will, as needed.
-  create the user and system paths, and purge
-  service records when containers, application attempts
-  and applications complete.
-
-  If false, the paths must be created by other means,
-  and no automatic cleanup of service records will take place.
-
-  
-
-  
 hadoop.registry.zk.root
 /registry
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5813c1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
index bd97a5a..db4f311 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
@@ -54,17 +54,6 @@ public interface RegistryConstants {
* flag to indicate whether or not the registry should
* be enabled in the RM: {@value}.
*/
-  String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled";
-
-  /**
-   * Defaut value for enabling the registry in the RM: {@value}.
-   */
-  boolean DEFAULT_REGISTRY_ENABLED = false;
-
-  /**
-   * flag to indicate whether or not the registry should
-   * be enabled in the RM: {@value}.
-   */
   String KEY_DNS_ENABLED = DNS_PREFIX + "enabled";
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5813c1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
index 46bc92d..1d03f8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
@@ -36,51 +36,6 @@ the values, so enabling them to read from and potentially 
write to the registry.
 ## Core Settings
 
 
-### Enabling the Registry in the Resource Manager
-
-The Resource Manager manages user directory creation and record cleanup
-on YARN container/application attempt/application completion.
-
-```
-  
-
-  Is the registry enabled in the YARN Resource Manager?
-
-  If true, the YARN RM will, as needed.
-  create the user and system paths, and purge
-  service records when containers, application attempts
-  and applications complete.
-
-  If false, the paths must be created by other means,
-  and no automatic cleanup of 

hadoop git commit: YARN-8845. Removed unused hadoop.registry.rm reference. Contributed by Íñigo Goiri

2018-10-09 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 8e8b74872 -> 614d6cf99


YARN-8845.  Removed unused hadoop.registry.rm reference.
Contributed by Íñigo Goiri

(cherry picked from commit bf04f194568f9e81f5481b25a84ad903e3c307cf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/614d6cf9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/614d6cf9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/614d6cf9

Branch: refs/heads/branch-3.2
Commit: 614d6cf99ce369c5cc07bff1999dd336f5eeb097
Parents: 8e8b748
Author: Eric Yang 
Authored: Tue Oct 9 15:54:45 2018 -0400
Committer: Eric Yang 
Committed: Tue Oct 9 15:56:04 2018 -0400

--
 .../src/main/resources/core-default.xml | 16 --
 .../registry/client/api/RegistryConstants.java  | 11 
 .../markdown/registry/registry-configuration.md | 56 
 .../src/site/markdown/registry/yarn-registry.md | 15 --
 4 files changed, 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/614d6cf9/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index f8eba04..3b86f72 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2744,22 +2744,6 @@
   
 
   
-hadoop.registry.rm.enabled
-false
-
-  Is the registry enabled in the YARN Resource Manager?
-
-  If true, the YARN RM will, as needed.
-  create the user and system paths, and purge
-  service records when containers, application attempts
-  and applications complete.
-
-  If false, the paths must be created by other means,
-  and no automatic cleanup of service records will take place.
-
-  
-
-  
 hadoop.registry.zk.root
 /registry
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/614d6cf9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
index bd97a5a..db4f311 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
@@ -54,17 +54,6 @@ public interface RegistryConstants {
* flag to indicate whether or not the registry should
* be enabled in the RM: {@value}.
*/
-  String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled";
-
-  /**
-   * Defaut value for enabling the registry in the RM: {@value}.
-   */
-  boolean DEFAULT_REGISTRY_ENABLED = false;
-
-  /**
-   * flag to indicate whether or not the registry should
-   * be enabled in the RM: {@value}.
-   */
   String KEY_DNS_ENABLED = DNS_PREFIX + "enabled";
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/614d6cf9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
index 46bc92d..1d03f8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
@@ -36,51 +36,6 @@ the values, so enabling them to read from and potentially 
write to the registry.
 ## Core Settings
 
 
-### Enabling the Registry in the Resource Manager
-
-The Resource Manager manages user directory creation and record cleanup
-on YARN container/application attempt/application completion.
-
-```
-  
-
-  Is the registry enabled in the YARN Resource Manager?
-
-  If true, the YARN RM will, as needed.
-  create the user and system paths, and purge
-  service records when containers, application attempts
-  and applications complete.
-
-  If false, the paths must be created by other means,
-  and no automatic cleanup of 

hadoop git commit: YARN-8845. Removed unused hadoop.registry.rm reference. Contributed by Íñigo Goiri

2018-10-09 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk d5dd6f31f -> bf04f1945


YARN-8845.  Removed unused hadoop.registry.rm reference.
Contributed by Íñigo Goiri


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf04f194
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf04f194
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf04f194

Branch: refs/heads/trunk
Commit: bf04f194568f9e81f5481b25a84ad903e3c307cf
Parents: d5dd6f3
Author: Eric Yang 
Authored: Tue Oct 9 15:54:45 2018 -0400
Committer: Eric Yang 
Committed: Tue Oct 9 15:54:45 2018 -0400

--
 .../src/main/resources/core-default.xml | 16 --
 .../registry/client/api/RegistryConstants.java  | 11 
 .../markdown/registry/registry-configuration.md | 56 
 .../src/site/markdown/registry/yarn-registry.md | 15 --
 4 files changed, 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf04f194/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index f3167f2..3ea2797 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2754,22 +2754,6 @@
   
 
   
-hadoop.registry.rm.enabled
-false
-
-  Is the registry enabled in the YARN Resource Manager?
-
-  If true, the YARN RM will, as needed.
-  create the user and system paths, and purge
-  service records when containers, application attempts
-  and applications complete.
-
-  If false, the paths must be created by other means,
-  and no automatic cleanup of service records will take place.
-
-  
-
-  
 hadoop.registry.zk.root
 /registry
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf04f194/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
index bd97a5a..db4f311 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
@@ -54,17 +54,6 @@ public interface RegistryConstants {
* flag to indicate whether or not the registry should
* be enabled in the RM: {@value}.
*/
-  String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled";
-
-  /**
-   * Defaut value for enabling the registry in the RM: {@value}.
-   */
-  boolean DEFAULT_REGISTRY_ENABLED = false;
-
-  /**
-   * flag to indicate whether or not the registry should
-   * be enabled in the RM: {@value}.
-   */
   String KEY_DNS_ENABLED = DNS_PREFIX + "enabled";
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf04f194/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
index 46bc92d..1d03f8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
@@ -36,51 +36,6 @@ the values, so enabling them to read from and potentially 
write to the registry.
 ## Core Settings
 
 
-### Enabling the Registry in the Resource Manager
-
-The Resource Manager manages user directory creation and record cleanup
-on YARN container/application attempt/application completion.
-
-```
-  
-
-  Is the registry enabled in the YARN Resource Manager?
-
-  If true, the YARN RM will, as needed.
-  create the user and system paths, and purge
-  service records when containers, application attempts
-  and applications complete.
-
-  If false, the paths must be created by other means,
-  and no automatic cleanup of service records will take place.
-
-hadoop.registry.rm.enabled
-false
-  

hadoop git commit: HDDS-585. Handle common request identifiers in a transparent way. Contributed by Elek Marton.

2018-10-09 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 d08a53877 -> 308c614d4


HDDS-585. Handle common request identifiers in a transparent way. Contributed 
by Elek Marton.

(cherry picked from commit d5dd6f31fc35b890cfa241d5fce404d6774e98c6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/308c614d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/308c614d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/308c614d

Branch: refs/heads/ozone-0.3
Commit: 308c614d4a45ac37ed31aaab3764026ac0e58395
Parents: d08a538
Author: Bharat Viswanadham 
Authored: Tue Oct 9 12:43:04 2018 -0700
Committer: Bharat Viswanadham 
Committed: Tue Oct 9 12:44:24 2018 -0700

--
 .../CommonHeadersContainerResponseFilter.java   |  9 
 .../apache/hadoop/ozone/s3/EndpointBase.java| 23 ++
 .../hadoop/ozone/s3/RequestIdentifier.java  | 48 
 .../hadoop/ozone/s3/bucket/DeleteBucket.java| 15 +++---
 .../hadoop/ozone/s3/bucket/HeadBucket.java  | 21 -
 .../ozone/s3/exception/OS3ExceptionMapper.java  |  9 
 .../hadoop/ozone/s3/exception/S3ErrorTable.java |  5 +-
 .../ozone/s3/bucket/TestDeleteBucket.java   |  5 +-
 .../hadoop/ozone/s3/bucket/TestHeadBucket.java  |  5 +-
 .../ozone/s3/exception/TestOS3Exception.java|  3 +-
 10 files changed, 90 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/308c614d/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
index d2f2d65..27f792e 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.s3;
 
+import javax.inject.Inject;
 import javax.ws.rs.container.ContainerRequestContext;
 import javax.ws.rs.container.ContainerResponseContext;
 import javax.ws.rs.container.ContainerResponseFilter;
@@ -30,10 +31,18 @@ import java.io.IOException;
 public class CommonHeadersContainerResponseFilter implements
 ContainerResponseFilter {
 
+  @Inject
+  private RequestIdentifier requestIdentifier;
+
   @Override
   public void filter(ContainerRequestContext containerRequestContext,
   ContainerResponseContext containerResponseContext) throws IOException {
+
 containerResponseContext.getHeaders().add("Server", "Ozone");
+containerResponseContext.getHeaders()
+.add("x-amz-id-2", requestIdentifier.getAmzId());
+containerResponseContext.getHeaders()
+.add("x-amz-request-id", requestIdentifier.getRequestId());
 
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/308c614d/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
index 007cca2..5ec45f7 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
@@ -24,12 +24,11 @@ import java.io.IOException;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneVolume;
-
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable.Resource;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,7 +41,6 @@ public class EndpointBase {
   LoggerFactory.getLogger(EndpointBase.class);
   @Inject
   private OzoneClient client;
-  private String requestId;
 
   protected OzoneBucket getBucket(String volumeName, String bucketName)
   throws IOException {
@@ -57,8 +55,8 @@ public class EndpointBase {
 } catch (IOException ex) {
   LOG.error("Error occurred is {}", ex);
   if (ex.getMessage().contains("NOT_FOUND")) {
-OS3Exception oex = S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET,
-OzoneUtils.getRequestID(), 

hadoop git commit: HDDS-585. Handle common request identifiers in a transparent way. Contributed by Elek Marton.

2018-10-09 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk bd50fa956 -> d5dd6f31f


HDDS-585. Handle common request identifiers in a transparent way. Contributed 
by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5dd6f31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5dd6f31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5dd6f31

Branch: refs/heads/trunk
Commit: d5dd6f31fc35b890cfa241d5fce404d6774e98c6
Parents: bd50fa9
Author: Bharat Viswanadham 
Authored: Tue Oct 9 12:43:04 2018 -0700
Committer: Bharat Viswanadham 
Committed: Tue Oct 9 12:43:04 2018 -0700

--
 .../CommonHeadersContainerResponseFilter.java   |  9 
 .../apache/hadoop/ozone/s3/EndpointBase.java| 23 ++
 .../hadoop/ozone/s3/RequestIdentifier.java  | 48 
 .../hadoop/ozone/s3/bucket/DeleteBucket.java| 15 +++---
 .../hadoop/ozone/s3/bucket/HeadBucket.java  | 21 -
 .../ozone/s3/exception/OS3ExceptionMapper.java  |  9 
 .../hadoop/ozone/s3/exception/S3ErrorTable.java |  5 +-
 .../ozone/s3/bucket/TestDeleteBucket.java   |  5 +-
 .../hadoop/ozone/s3/bucket/TestHeadBucket.java  |  5 +-
 .../ozone/s3/exception/TestOS3Exception.java|  3 +-
 10 files changed, 90 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5dd6f31/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
index d2f2d65..27f792e 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.s3;
 
+import javax.inject.Inject;
 import javax.ws.rs.container.ContainerRequestContext;
 import javax.ws.rs.container.ContainerResponseContext;
 import javax.ws.rs.container.ContainerResponseFilter;
@@ -30,10 +31,18 @@ import java.io.IOException;
 public class CommonHeadersContainerResponseFilter implements
 ContainerResponseFilter {
 
+  @Inject
+  private RequestIdentifier requestIdentifier;
+
   @Override
   public void filter(ContainerRequestContext containerRequestContext,
   ContainerResponseContext containerResponseContext) throws IOException {
+
 containerResponseContext.getHeaders().add("Server", "Ozone");
+containerResponseContext.getHeaders()
+.add("x-amz-id-2", requestIdentifier.getAmzId());
+containerResponseContext.getHeaders()
+.add("x-amz-request-id", requestIdentifier.getRequestId());
 
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5dd6f31/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
index 007cca2..5ec45f7 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/EndpointBase.java
@@ -24,12 +24,11 @@ import java.io.IOException;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneVolume;
-
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable.Resource;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,7 +41,6 @@ public class EndpointBase {
   LoggerFactory.getLogger(EndpointBase.class);
   @Inject
   private OzoneClient client;
-  private String requestId;
 
   protected OzoneBucket getBucket(String volumeName, String bucketName)
   throws IOException {
@@ -57,8 +55,8 @@ public class EndpointBase {
 } catch (IOException ex) {
   LOG.error("Error occurred is {}", ex);
   if (ex.getMessage().contains("NOT_FOUND")) {
-OS3Exception oex = S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET,
-OzoneUtils.getRequestID(), Resource.BUCKET);
+OS3Exception oex =
+

hadoop git commit: HADOOP-15825. ABFS: Enable some tests for namespace not enabled account using OAuth. Contributed by Da Zhou.

2018-10-09 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk a05bd1288 -> bd50fa956


HADOOP-15825. ABFS: Enable some tests for namespace not enabled account using 
OAuth.
Contributed by Da Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd50fa95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd50fa95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd50fa95

Branch: refs/heads/trunk
Commit: bd50fa956b1ca25bb2136977b98a6aa6895eff8b
Parents: a05bd12
Author: Steve Loughran 
Authored: Tue Oct 9 20:02:12 2018 +0100
Committer: Steve Loughran 
Committed: Tue Oct 9 20:02:12 2018 +0100

--
 .../fs/azurebfs/AbstractAbfsIntegrationTest.java  |  3 ++-
 .../azurebfs/ITestAzureBlobFileSystemBackCompat.java  |  6 ++
 .../azurebfs/ITestAzureBlobFileSystemFileStatus.java  | 12 ++--
 .../fs/azurebfs/ITestAzureBlobFileSystemFinalize.java |  8 +++-
 .../fs/azurebfs/ITestAzureBlobFileSystemFlush.java|  5 -
 .../ITestAzureBlobFileSystemInitAndCreate.java|  3 +--
 .../azurebfs/ITestAzureBlobFileSystemRandomRead.java  | 12 +---
 .../fs/azurebfs/ITestWasbAbfsCompatibility.java   | 14 --
 8 files changed, 27 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd50fa95/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java
index 6f794d0..382cd7f 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java
@@ -133,7 +133,8 @@ public abstract class AbstractAbfsIntegrationTest extends
 //Create filesystem first to make sure getWasbFileSystem() can return an 
existing filesystem.
 createFileSystem();
 
-if (!isIPAddress && authType == AuthType.SharedKey) {
+// Only live account without namespace support can run ABFS 
compatibility tests
+if (!isIPAddress && !abfs.getIsNamespaceEnabled()) {
   final URI wasbUri = new URI(abfsUrlToWasbUrl(getTestUrl()));
   final AzureNativeFileSystemStore azureNativeFileSystemStore =
   new AzureNativeFileSystemStore();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd50fa95/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java
index 22d4990..5ac16b4 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java
@@ -26,7 +26,6 @@ import com.microsoft.azure.storage.blob.CloudBlockBlob;
 import org.junit.Assume;
 import org.junit.Test;
 
-import org.apache.hadoop.fs.azurebfs.services.AuthType;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 
@@ -38,14 +37,13 @@ public class ITestAzureBlobFileSystemBackCompat extends
 
   public ITestAzureBlobFileSystemBackCompat() throws Exception {
 super();
-Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey);
   }
 
   @Test
   public void testBlobBackCompat() throws Exception {
 final AzureBlobFileSystem fs = this.getFileSystem();
-// test only valid for non-namespace enabled account
-Assume.assumeFalse(fs.getIsNamespaceEnabled());
+Assume.assumeFalse("This test does not support namespace enabled account",
+this.getFileSystem().getIsNamespaceEnabled());
 String storageConnectionString = getBlobConnectionString();
 CloudStorageAccount storageAccount = 
CloudStorageAccount.parse(storageConnectionString);
 CloudBlobClient blobClient = storageAccount.createCloudBlobClient();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd50fa95/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
 

hadoop git commit: HDFS-13979. Review StateStoreFileSystemImpl Class. Contributed by BELUGA BEHR.

2018-10-09 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4b5b1ac3d -> a05bd1288


HDFS-13979. Review StateStoreFileSystemImpl Class. Contributed by BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a05bd128
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a05bd128
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a05bd128

Branch: refs/heads/trunk
Commit: a05bd1288cabfe4cafb0693739add4a675aec5a2
Parents: 4b5b1ac
Author: Inigo Goiri 
Authored: Tue Oct 9 12:00:21 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Oct 9 12:00:21 2018 -0700

--
 .../store/driver/impl/StateStoreFileSystemImpl.java | 16 +---
 1 file changed, 9 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a05bd128/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
index 2e1ff8f..e6bf159 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
@@ -24,7 +24,8 @@ import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.net.URI;
 import java.nio.charset.StandardCharsets;
-import java.util.LinkedList;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -33,8 +34,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.slf4j.Logger;
@@ -50,13 +51,13 @@ public class StateStoreFileSystemImpl extends 
StateStoreFileBaseImpl {
   private static final Logger LOG =
   LoggerFactory.getLogger(StateStoreFileSystemImpl.class);
 
-
   /** Configuration keys. */
   public static final String FEDERATION_STORE_FS_PATH =
   RBFConfigKeys.FEDERATION_STORE_PREFIX + "driver.fs.path";
 
   /** File system to back the State Store. */
   private FileSystem fs;
+
   /** Working path in the filesystem. */
   private String workPath;
 
@@ -141,7 +142,7 @@ public class StateStoreFileSystemImpl extends 
StateStoreFileBaseImpl {
   new InputStreamReader(fdis, StandardCharsets.UTF_8);
   reader = new BufferedReader(isr);
 } catch (IOException ex) {
-  LOG.error("Cannot open read stream for {}", path);
+  LOG.error("Cannot open read stream for {}", path, ex);
 }
 return reader;
   }
@@ -156,25 +157,26 @@ public class StateStoreFileSystemImpl extends 
StateStoreFileBaseImpl {
   new OutputStreamWriter(fdos, StandardCharsets.UTF_8);
   writer = new BufferedWriter(osw);
 } catch (IOException ex) {
-  LOG.error("Cannot open write stream for {}", path);
+  LOG.error("Cannot open write stream for {}", path, ex);
 }
 return writer;
   }
 
   @Override
   protected List getChildren(String pathName) {
-List ret = new LinkedList<>();
 Path path = new Path(workPath, pathName);
 try {
   FileStatus[] files = fs.listStatus(path);
+  List ret = new ArrayList<>(files.length);
   for (FileStatus file : files) {
 Path filePath = file.getPath();
 String fileName = filePath.getName();
 ret.add(fileName);
   }
+  return ret;
 } catch (Exception e) {
   LOG.error("Cannot get children for {}", pathName, e);
+  return Collections.emptyList();
 }
-return ret;
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15550. Avoid static initialization of ObjectMappers

2018-10-09 Thread jeagles
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 be0ce74c7 -> 89d448102


HADOOP-15550. Avoid static initialization of ObjectMappers

(cherry picked from commit 7a3c6e9c3cd9ffdc71946fd12f5c3d59718c4939)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89d44810
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89d44810
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89d44810

Branch: refs/heads/branch-3.1
Commit: 89d4481020ed50d54d750de204c86d2b3b76a939
Parents: be0ce74
Author: Todd Lipcon 
Authored: Mon Jun 25 15:36:45 2018 -0700
Committer: Jonathan Eagles 
Committed: Tue Oct 9 13:50:00 2018 -0500

--
 .../crypto/key/kms/KMSClientProvider.java   |  7 ++
 .../web/DelegationTokenAuthenticator.java   |  8 ++-
 .../apache/hadoop/util/HttpExceptionUtils.java  | 12 ++
 .../apache/hadoop/util/JsonSerialization.java   | 24 
 .../crypto/key/kms/server/KMSJSONWriter.java|  6 ++---
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  7 ++
 ...onfRefreshTokenBasedAccessTokenProvider.java |  8 +++
 .../CredentialBasedAccessTokenProvider.java |  8 +++
 .../apache/hadoop/mapreduce/JobSubmitter.java   |  8 +++
 .../hadoop/fs/azure/security/JsonUtils.java |  4 ++--
 10 files changed, 45 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d44810/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index edbf897..7b46075 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.security.token.TokenRenewer;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
+import org.apache.hadoop.util.JsonSerialization;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.http.client.utils.URIBuilder;
 import org.slf4j.Logger;
@@ -79,7 +80,6 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
@@ -132,9 +132,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
   private final ValueQueue encKeyVersionQueue;
 
-  private static final ObjectWriter WRITER =
-  new ObjectMapper().writerWithDefaultPrettyPrinter();
-
   private final Text dtService;
 
   // Allow fallback to default kms server port 9600 for certain tests that do
@@ -237,7 +234,7 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static void writeJson(Object obj, OutputStream os)
   throws IOException {
 Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
-WRITER.writeValue(writer, obj);
+JsonSerialization.writer().writeValue(writer, obj);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d44810/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 617773b..0ae2af3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.security.token.delegation.web;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
 import org.apache.hadoop.classification.InterfaceAudience;
 import 

hadoop git commit: MAPREDUCE-7130. Rumen crashes trying to handle MRAppMaster recovery events. Contributed by Peter Bacsko

2018-10-09 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 03d66b1f5 -> 4b5b1ac3d


MAPREDUCE-7130. Rumen crashes trying to handle MRAppMaster recovery events. 
Contributed by  Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b5b1ac3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b5b1ac3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b5b1ac3

Branch: refs/heads/trunk
Commit: 4b5b1ac3d10f1a190450ad59b8be5c9568921852
Parents: 03d66b1
Author: Jason Lowe 
Authored: Tue Oct 9 13:27:03 2018 -0500
Committer: Jason Lowe 
Committed: Tue Oct 9 13:27:03 2018 -0500

--
 .../hadoop/tools/rumen/Pre21JobHistoryConstants.java | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b5b1ac3/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
--
diff --git 
a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
 
b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
index 239d666..8adff46 100644
--- 
a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
+++ 
b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java
@@ -44,12 +44,17 @@ public class Pre21JobHistoryConstants {
   /**
* This enum contains some of the values commonly used by history log 
events. 
* since values in history can only be strings - Values.name() is used in 
-   * most places in history file. 
+   * most places in history file.
+   *
+   * Note: "SUCCEEDED" is actually not a pre-0.21 value, but it might appear
+   * in jhist logs when the event is an unsuccessful job completion, yet, the
+   * overall job status is "SUCCEEDED".
*/
   public static enum Values {
-SUCCESS, FAILED, KILLED, MAP, REDUCE, CLEANUP, RUNNING, PREP, SETUP
+SUCCESS, SUCCEEDED, FAILED, KILLED, MAP, REDUCE, CLEANUP, RUNNING, PREP,
+SETUP
   }
-  
+
   /**
* Regex for Pre21 V1(old) jobhistory filename
*   i.e jt-identifier_job-id_user-name_job-name


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDDS-590. Add unit test for HDDS-583. Contributed by Namit Maheshwari.

2018-10-09 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 abfd4ffdc -> d08a53877
  refs/heads/trunk c3d22d3b4 -> 03d66b1f5


HDDS-590. Add unit test for HDDS-583. Contributed by Namit Maheshwari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03d66b1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03d66b1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03d66b1f

Branch: refs/heads/trunk
Commit: 03d66b1f5d8936e8006b87511af7a647ed1c3fcd
Parents: c3d22d3
Author: Arpit Agarwal 
Authored: Tue Oct 9 11:04:43 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Oct 9 11:04:43 2018 -0700

--
 .../hadoop/ozone/TestStorageContainerManager.java  | 13 +
 1 file changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03d66b1f/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index ac3ad5d..c3c5d04 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ExitUtil;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -437,6 +438,18 @@ public class TestStorageContainerManager {
   }
 
   @Test
+  public void testSCMInitializationReturnCode() throws IOException {
+ExitUtil.disableSystemExit();
+OzoneConfiguration conf = new OzoneConfiguration();
+conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+// Set invalid args
+String[] invalidArgs = {"--zxcvbnm"};
+exception.expect(ExitUtil.ExitException.class);
+exception.expectMessage("ExitException");
+StorageContainerManager.createSCM(invalidArgs, conf);
+  }
+
+  @Test
   public void testScmInfo() throws Exception {
 OzoneConfiguration conf = new OzoneConfiguration();
 final String path =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDDS-590. Add unit test for HDDS-583. Contributed by Namit Maheshwari.

2018-10-09 Thread arp
HDDS-590. Add unit test for HDDS-583. Contributed by Namit Maheshwari.

(cherry picked from commit 03d66b1f5d8936e8006b87511af7a647ed1c3fcd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d08a5387
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d08a5387
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d08a5387

Branch: refs/heads/ozone-0.3
Commit: d08a5387778a85e7f47d76ad47da775caa55e489
Parents: abfd4ff
Author: Arpit Agarwal 
Authored: Tue Oct 9 11:04:43 2018 -0700
Committer: Arpit Agarwal 
Committed: Tue Oct 9 11:05:23 2018 -0700

--
 .../hadoop/ozone/TestStorageContainerManager.java  | 13 +
 1 file changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d08a5387/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index ac3ad5d..c3c5d04 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ExitUtil;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -437,6 +438,18 @@ public class TestStorageContainerManager {
   }
 
   @Test
+  public void testSCMInitializationReturnCode() throws IOException {
+ExitUtil.disableSystemExit();
+OzoneConfiguration conf = new OzoneConfiguration();
+conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+// Set invalid args
+String[] invalidArgs = {"--zxcvbnm"};
+exception.expect(ExitUtil.ExitException.class);
+exception.expectMessage("ExitException");
+StorageContainerManager.createSCM(invalidArgs, conf);
+  }
+
+  @Test
   public void testScmInfo() throws Exception {
 OzoneConfiguration conf = new OzoneConfiguration();
 final String path =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-7652. Handle AM register requests asynchronously in FederationInterceptor. Contributed by Botong Huang.

2018-10-09 Thread inigoiri
YARN-7652. Handle AM register requests asynchronously in FederationInterceptor. 
Contributed by Botong Huang.

(cherry picked from commit c3d22d3b4569b7f87af4ee4abfcc284deebe90de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0900ad3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0900ad3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0900ad3

Branch: refs/heads/branch-2
Commit: b0900ad31000be6491127f4b5ef781a0ec423645
Parents: fa4a111
Author: Inigo Goiri 
Authored: Tue Oct 9 10:29:40 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Oct 9 10:30:42 2018 -0700

--
 .../yarn/server/AMHeartbeatRequestHandler.java  |  10 +-
 .../hadoop/yarn/server/AMRMClientRelayer.java   |   6 +-
 .../server/uam/UnmanagedApplicationManager.java |  20 +-
 .../yarn/server/MockResourceManagerFacade.java  |  29 +-
 .../uam/TestUnmanagedApplicationManager.java|  51 +++-
 .../amrmproxy/FederationInterceptor.java| 291 +++
 .../amrmproxy/TestFederationInterceptor.java|  16 +-
 .../TestableFederationInterceptor.java  |  26 +-
 8 files changed, 217 insertions(+), 232 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0900ad3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
index 380c216..1534354 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
@@ -59,7 +59,7 @@ public class AMHeartbeatRequestHandler extends Thread {
   private int lastResponseId;
 
   public AMHeartbeatRequestHandler(Configuration conf,
-  ApplicationId applicationId) {
+  ApplicationId applicationId, AMRMClientRelayer rmProxyRelayer) {
 super("AMHeartbeatRequestHandler Heartbeat Handler Thread");
 this.setUncaughtExceptionHandler(
 new HeartBeatThreadUncaughtExceptionHandler());
@@ -69,6 +69,7 @@ public class AMHeartbeatRequestHandler extends Thread {
 this.conf = conf;
 this.applicationId = applicationId;
 this.requestQueue = new LinkedBlockingQueue<>();
+this.rmProxyRelayer = rmProxyRelayer;
 
 resetLastResponseId();
   }
@@ -157,13 +158,6 @@ public class AMHeartbeatRequestHandler extends Thread {
   }
 
   /**
-   * Set the AMRMClientRelayer for RM connection.
-   */
-  public void setAMRMClientRelayer(AMRMClientRelayer relayer) {
-this.rmProxyRelayer = relayer;
-  }
-
-  /**
* Set the UGI for RM connection.
*/
   public void setUGI(UserGroupInformation ugi) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0900ad3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
index 790147c..5c7f7a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
@@ -181,7 +181,11 @@ public class AMRMClientRelayer extends AbstractService
 this.amRegistrationRequest = registerRequest;
   }
 
-  public void setRMClient(ApplicationMasterProtocol client){
+  public String getRMIdentifier() {
+return this.rmId;
+  }
+
+  public void setRMClient(ApplicationMasterProtocol client) {
 this.rmClient = client;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0900ad3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
--
diff --git 

[1/2] hadoop git commit: YARN-7652. Handle AM register requests asynchronously in FederationInterceptor. Contributed by Botong Huang.

2018-10-09 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fa4a11103 -> b0900ad31
  refs/heads/trunk a23ea68b9 -> c3d22d3b4


YARN-7652. Handle AM register requests asynchronously in FederationInterceptor. 
Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3d22d3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3d22d3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3d22d3b

Branch: refs/heads/trunk
Commit: c3d22d3b4569b7f87af4ee4abfcc284deebe90de
Parents: a23ea68
Author: Inigo Goiri 
Authored: Tue Oct 9 10:29:40 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Oct 9 10:29:40 2018 -0700

--
 .../yarn/server/AMHeartbeatRequestHandler.java  |  10 +-
 .../hadoop/yarn/server/AMRMClientRelayer.java   |   6 +-
 .../server/uam/UnmanagedApplicationManager.java |  20 +-
 .../yarn/server/MockResourceManagerFacade.java  |  29 +-
 .../uam/TestUnmanagedApplicationManager.java|  51 +++-
 .../amrmproxy/FederationInterceptor.java| 291 +++
 .../amrmproxy/TestFederationInterceptor.java|  16 +-
 .../TestableFederationInterceptor.java  |  26 +-
 8 files changed, 217 insertions(+), 232 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3d22d3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
index 380c216..1534354 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
@@ -59,7 +59,7 @@ public class AMHeartbeatRequestHandler extends Thread {
   private int lastResponseId;
 
   public AMHeartbeatRequestHandler(Configuration conf,
-  ApplicationId applicationId) {
+  ApplicationId applicationId, AMRMClientRelayer rmProxyRelayer) {
 super("AMHeartbeatRequestHandler Heartbeat Handler Thread");
 this.setUncaughtExceptionHandler(
 new HeartBeatThreadUncaughtExceptionHandler());
@@ -69,6 +69,7 @@ public class AMHeartbeatRequestHandler extends Thread {
 this.conf = conf;
 this.applicationId = applicationId;
 this.requestQueue = new LinkedBlockingQueue<>();
+this.rmProxyRelayer = rmProxyRelayer;
 
 resetLastResponseId();
   }
@@ -157,13 +158,6 @@ public class AMHeartbeatRequestHandler extends Thread {
   }
 
   /**
-   * Set the AMRMClientRelayer for RM connection.
-   */
-  public void setAMRMClientRelayer(AMRMClientRelayer relayer) {
-this.rmProxyRelayer = relayer;
-  }
-
-  /**
* Set the UGI for RM connection.
*/
   public void setUGI(UserGroupInformation ugi) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3d22d3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
index ca045d1..dc66868 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
@@ -186,7 +186,11 @@ public class AMRMClientRelayer extends AbstractService
 this.amRegistrationRequest = registerRequest;
   }
 
-  public void setRMClient(ApplicationMasterProtocol client){
+  public String getRMIdentifier() {
+return this.rmId;
+  }
+
+  public void setRMClient(ApplicationMasterProtocol client) {
 this.rmClient = client;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3d22d3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
--
diff --git 

hadoop git commit: YARN-8852. Add documentation for submarine installation details. (Zac Zhou via wangda)

2018-10-09 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 f6a73d181 -> 8e8b74872


YARN-8852. Add documentation for submarine installation details. (Zac Zhou via 
wangda)

Change-Id: If5681d1ef37ff5dc916735eeef15a6120173d653
(cherry picked from commit a23ea68b9747eae9b176f908bb04b76d30fe3795)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e8b7487
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e8b7487
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e8b7487

Branch: refs/heads/branch-3.2
Commit: 8e8b74872498077a1c3568b5f55b4e215669bad4
Parents: f6a73d1
Author: Wangda Tan 
Authored: Tue Oct 9 10:18:00 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 9 10:19:16 2018 -0700

--
 .../src/site/markdown/Index.md  |   4 +
 .../src/site/markdown/InstallationGuide.md  | 760 +++
 .../markdown/InstallationGuideChineseVersion.md | 757 ++
 3 files changed, 1521 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e8b7487/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
index 0b78a87..0006f6c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
@@ -40,3 +40,7 @@ Click below contents if you want to understand more.
 - [How to write Dockerfile for Submarine jobs](WriteDockerfile.html)
 
 - [Developer guide](DeveloperGuide.html)
+
+- [Installation guide](InstallationGuide.html)
+
+- [Installation guide Chinese version](InstallationGuideChineseVersion.html)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e8b7487/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
new file mode 100644
index 000..d4f4269
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
@@ -0,0 +1,760 @@
+
+
+# Submarine Installation Guide
+
+## Prerequisites
+
+### Operating System
+
+The operating system and kernel versions we used are as shown in the following 
table, which should be minimum required versions:
+
+| Enviroment | Verion |
+| -- | -- |
+| Operating System | centos-release-7-3.1611.el7.centos.x86_64 |
+| Kernal | 3.10.0-514.el7.x86_64 |
+
+### User & Group
+
+As there are some specific users and groups need to be created to install 
hadoop/docker. Please create them if they are missing.
+
+```
+adduser hdfs
+adduser mapred
+adduser yarn
+addgroup hadoop
+usermod -aG hdfs,hadoop hdfs
+usermod -aG mapred,hadoop mapred
+usermod -aG yarn,hadoop yarn
+usermod -aG hdfs,hadoop hadoop
+groupadd docker
+usermod -aG docker yarn
+usermod -aG docker hadoop
+```
+
+### GCC Version
+
+Check the version of GCC tool
+
+```bash
+gcc --version
+gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-11)
+# install if needed
+yum install gcc make g++
+```
+
+### Kernel header & Kernel devel
+
+```bash
+# Approach 1:
+yum install kernel-devel-$(uname -r) kernel-headers-$(uname -r)
+# Approach 2:
+wget 
http://vault.centos.org/7.3.1611/os/x86_64/Packages/kernel-headers-3.10.0-514.el7.x86_64.rpm
+rpm -ivh kernel-headers-3.10.0-514.el7.x86_64.rpm
+```
+
+### GPU Servers
+
+```
+lspci | grep -i nvidia
+
+# If the server has gpus, you can get info like this:
+04:00.0 3D controller: NVIDIA Corporation Device 1b38 (rev a1)
+82:00.0 3D controller: NVIDIA Corporation Device 1b38 (rev a1)
+```
+
+
+
+### Nvidia Driver Installation
+
+If nvidia driver/cuda has been installed before, They should be uninstalled 
firstly.
+
+```
+# uninstall cuda:
+sudo /usr/local/cuda-10.0/bin/uninstall_cuda_10.0.pl
+
+# uninstall nvidia-driver:
+sudo /usr/bin/nvidia-uninstall
+```
+
+To check GPU version, install nvidia-detect
+
+```
+yum install nvidia-detect
+# run 'nvidia-detect -v' to get reqired nvidia driver version:
+nvidia-detect -v
+Probing for supported NVIDIA devices...
+[10de:13bb] NVIDIA Corporation GM107GL [Quadro K620]
+This device requires the current 390.87 

hadoop git commit: YARN-8852. Add documentation for submarine installation details. (Zac Zhou via wangda)

2018-10-09 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 600438bcd -> a23ea68b9


YARN-8852. Add documentation for submarine installation details. (Zac Zhou via 
wangda)

Change-Id: If5681d1ef37ff5dc916735eeef15a6120173d653


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a23ea68b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a23ea68b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a23ea68b

Branch: refs/heads/trunk
Commit: a23ea68b9747eae9b176f908bb04b76d30fe3795
Parents: 600438b
Author: Wangda Tan 
Authored: Tue Oct 9 10:18:00 2018 -0700
Committer: Wangda Tan 
Committed: Tue Oct 9 10:18:27 2018 -0700

--
 .../src/site/markdown/Index.md  |   4 +
 .../src/site/markdown/InstallationGuide.md  | 760 +++
 .../markdown/InstallationGuideChineseVersion.md | 757 ++
 3 files changed, 1521 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ea68b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
index 0b78a87..0006f6c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/Index.md
@@ -40,3 +40,7 @@ Click below contents if you want to understand more.
 - [How to write Dockerfile for Submarine jobs](WriteDockerfile.html)
 
 - [Developer guide](DeveloperGuide.html)
+
+- [Installation guide](InstallationGuide.html)
+
+- [Installation guide Chinese version](InstallationGuideChineseVersion.html)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ea68b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
new file mode 100644
index 000..d4f4269
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/site/markdown/InstallationGuide.md
@@ -0,0 +1,760 @@
+
+
+# Submarine Installation Guide
+
+## Prerequisites
+
+### Operating System
+
+The operating system and kernel versions we used are as shown in the following 
table, which should be minimum required versions:
+
+| Enviroment | Verion |
+| -- | -- |
+| Operating System | centos-release-7-3.1611.el7.centos.x86_64 |
+| Kernal | 3.10.0-514.el7.x86_64 |
+
+### User & Group
+
+As there are some specific users and groups need to be created to install 
hadoop/docker. Please create them if they are missing.
+
+```
+adduser hdfs
+adduser mapred
+adduser yarn
+addgroup hadoop
+usermod -aG hdfs,hadoop hdfs
+usermod -aG mapred,hadoop mapred
+usermod -aG yarn,hadoop yarn
+usermod -aG hdfs,hadoop hadoop
+groupadd docker
+usermod -aG docker yarn
+usermod -aG docker hadoop
+```
+
+### GCC Version
+
+Check the version of GCC tool
+
+```bash
+gcc --version
+gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-11)
+# install if needed
+yum install gcc make g++
+```
+
+### Kernel header & Kernel devel
+
+```bash
+# Approach 1:
+yum install kernel-devel-$(uname -r) kernel-headers-$(uname -r)
+# Approach 2:
+wget 
http://vault.centos.org/7.3.1611/os/x86_64/Packages/kernel-headers-3.10.0-514.el7.x86_64.rpm
+rpm -ivh kernel-headers-3.10.0-514.el7.x86_64.rpm
+```
+
+### GPU Servers
+
+```
+lspci | grep -i nvidia
+
+# If the server has gpus, you can get info like this:
+04:00.0 3D controller: NVIDIA Corporation Device 1b38 (rev a1)
+82:00.0 3D controller: NVIDIA Corporation Device 1b38 (rev a1)
+```
+
+
+
+### Nvidia Driver Installation
+
+If nvidia driver/cuda has been installed before, They should be uninstalled 
firstly.
+
+```
+# uninstall cuda:
+sudo /usr/local/cuda-10.0/bin/uninstall_cuda_10.0.pl
+
+# uninstall nvidia-driver:
+sudo /usr/bin/nvidia-uninstall
+```
+
+To check GPU version, install nvidia-detect
+
+```
+yum install nvidia-detect
+# run 'nvidia-detect -v' to get reqired nvidia driver version:
+nvidia-detect -v
+Probing for supported NVIDIA devices...
+[10de:13bb] NVIDIA Corporation GM107GL [Quadro K620]
+This device requires the current 390.87 NVIDIA driver kmod-nvidia
+[8086:1912] Intel Corporation HD Graphics 530
+An Intel 

hadoop git commit: HDDS-602. Bump Ozone version to 0.4.0-SNAPSHOT after ozone-0.3 branch cut.

2018-10-09 Thread elek
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6f11919be -> 600438bcd


HDDS-602. Bump Ozone version to 0.4.0-SNAPSHOT after ozone-0.3 branch cut.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/600438bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/600438bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/600438bc

Branch: refs/heads/trunk
Commit: 600438bcde7f4a052c1f6447e51ae49a7c3a0e12
Parents: 6f11919
Author: Márton Elek 
Authored: Tue Oct 9 19:04:04 2018 +0200
Committer: Márton Elek 
Committed: Tue Oct 9 19:04:25 2018 +0200

--
 hadoop-hdds/client/pom.xml   | 4 ++--
 hadoop-hdds/common/pom.xml   | 6 +++---
 hadoop-hdds/container-service/pom.xml| 4 ++--
 hadoop-hdds/framework/pom.xml| 4 ++--
 hadoop-hdds/pom.xml  | 2 +-
 hadoop-hdds/server-scm/pom.xml   | 4 ++--
 hadoop-hdds/tools/pom.xml| 4 ++--
 hadoop-ozone/client/pom.xml  | 4 ++--
 hadoop-ozone/common/pom.xml  | 4 ++--
 hadoop-ozone/datanode/pom.xml| 4 ++--
 hadoop-ozone/dist/pom.xml| 4 ++--
 hadoop-ozone/docs/pom.xml| 4 ++--
 hadoop-ozone/integration-test/pom.xml| 4 ++--
 hadoop-ozone/objectstore-service/pom.xml | 4 ++--
 hadoop-ozone/ozone-manager/pom.xml   | 4 ++--
 hadoop-ozone/ozonefs/pom.xml | 4 ++--
 hadoop-ozone/pom.xml | 6 +++---
 hadoop-ozone/s3gateway/pom.xml   | 4 ++--
 hadoop-ozone/tools/pom.xml   | 4 ++--
 pom.xml  | 2 +-
 20 files changed, 40 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/600438bc/hadoop-hdds/client/pom.xml
--
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index 53d72bb..38f1f59 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.3.0-SNAPSHOT
+0.4.0-SNAPSHOT
   
 
   hadoop-hdds-client
-  0.3.0-SNAPSHOT
+  0.4.0-SNAPSHOT
   Apache Hadoop Distributed Data Store Client 
Library
   Apache Hadoop HDDS Client
   jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/600438bc/hadoop-hdds/common/pom.xml
--
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 64ebe9b..eea2264 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -20,16 +20,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.3.0-SNAPSHOT
+0.4.0-SNAPSHOT
   
   hadoop-hdds-common
-  0.3.0-SNAPSHOT
+  0.4.0-SNAPSHOT
   Apache Hadoop Distributed Data Store Common
   Apache Hadoop HDDS Common
   jar
 
   
-0.3.0-SNAPSHOT
+0.4.0-SNAPSHOT
 2.11.0
 3.4.2
 ${hdds.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/600438bc/hadoop-hdds/container-service/pom.xml
--
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index 54c5fad..0df5893 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.3.0-SNAPSHOT
+0.4.0-SNAPSHOT
   
   hadoop-hdds-container-service
-  0.3.0-SNAPSHOT
+  0.4.0-SNAPSHOT
   Apache Hadoop Distributed Data Store Container 
Service
   Apache Hadoop HDDS Container Service
   jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/600438bc/hadoop-hdds/framework/pom.xml
--
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index 511f321..650442d 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 org.apache.hadoop
 hadoop-hdds
-0.3.0-SNAPSHOT
+0.4.0-SNAPSHOT
   
   hadoop-hdds-server-framework
-  0.3.0-SNAPSHOT
+  0.4.0-SNAPSHOT
   Apache Hadoop Distributed Data Store Server 
Framework
   Apache Hadoop HDDS Server Framework
   jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/600438bc/hadoop-hdds/pom.xml
--
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index f9a6b57..a5fb32d 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -25,7 +25,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   hadoop-hdds
-  0.3.0-SNAPSHOT
+  0.4.0-SNAPSHOT
   Apache Hadoop 

hadoop git commit: HDDS-450. Generate BlockCommitSequenceId in ContainerStateMachine for every commit operation in Ratis. Contributed by Shashikant Banerjee.

2018-10-09 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 4ba6dd460 -> abfd4ffdc


HDDS-450. Generate BlockCommitSequenceId in ContainerStateMachine for every 
commit operation in Ratis. Contributed by Shashikant Banerjee.

(cherry picked from commit 7367ff333bf332b300e0acd6e7501ce8139a1998)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abfd4ffd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abfd4ffd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abfd4ffd

Branch: refs/heads/ozone-0.3
Commit: abfd4ffdc90ada2c44fd1e563a12a9cf574f7b5b
Parents: 4ba6dd4
Author: Nanda kumar 
Authored: Tue Oct 9 18:07:01 2018 +0530
Committer: Nanda kumar 
Committed: Tue Oct 9 22:31:19 2018 +0530

--
 .../hdds/scm/storage/ChunkOutputStream.java | 12 +++-
 .../scm/storage/ContainerProtocolCalls.java | 25 
 .../container/common/helpers/BlockData.java | 12 
 .../main/proto/DatanodeContainerProtocol.proto  |  2 ++
 .../server/ratis/ContainerStateMachine.java | 31 
 .../container/keyvalue/helpers/BlockUtils.java  |  5 +++-
 .../keyvalue/impl/BlockManagerImpl.java |  1 -
 .../ozone/client/io/ChunkGroupOutputStream.java | 16 +++---
 .../ozone/om/helpers/OmKeyLocationInfo.java | 17 +--
 .../src/main/proto/OzoneManagerProtocol.proto   |  1 +
 .../TestGetCommittedBlockLengthAndPutKey.java   |  4 ++-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  2 ++
 12 files changed, 99 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abfd4ffd/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 10b3bb5..cc1ea8d 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -65,6 +66,7 @@ public class ChunkOutputStream extends OutputStream {
   private final String streamId;
   private int chunkIndex;
   private int chunkSize;
+  private long blockCommitSequenceId;
 
   /**
* Creates a new ChunkOutputStream.
@@ -93,12 +95,17 @@ public class ChunkOutputStream extends OutputStream {
 this.buffer = ByteBuffer.allocate(chunkSize);
 this.streamId = UUID.randomUUID().toString();
 this.chunkIndex = 0;
+blockCommitSequenceId = 0;
   }
 
   public ByteBuffer getBuffer() {
 return buffer;
   }
 
+  public long getBlockCommitSequenceId() {
+return blockCommitSequenceId;
+  }
+
   @Override
   public void write(int b) throws IOException {
 checkOpen();
@@ -155,7 +162,10 @@ public class ChunkOutputStream extends OutputStream {
 writeChunkToContainer();
   }
   try {
-putBlock(xceiverClient, containerBlockData.build(), traceID);
+ContainerProtos.PutBlockResponseProto responseProto =
+putBlock(xceiverClient, containerBlockData.build(), traceID);
+blockCommitSequenceId =
+responseProto.getCommittedBlockLength().getBlockCommitSequenceId();
   } catch (IOException e) {
 throw new IOException(
 "Unexpected Storage Container Exception: " + e.toString(), e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/abfd4ffd/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 6b7a328..1df50b1 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -141,24 +141,23 @@ public final class ContainerProtocolCalls  {
* @param xceiverClient client to perform call
* @param containerBlockData block data to identify container
* @param traceID container protocol call args
+   * @return putBlockResponse
* @throws IOException if there is an I/O error while 

hadoop git commit: HDDS-599. Fix TestOzoneConfiguration TestOzoneConfigurationFields. Contributed by Sandeep Nemuri.

2018-10-09 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 347ea3858 -> 4ba6dd460


HDDS-599. Fix TestOzoneConfiguration TestOzoneConfigurationFields. Contributed 
by Sandeep Nemuri.

(cherry picked from commit 9e9915ddab7ff79224b075889f67c38920fde9f0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ba6dd46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ba6dd46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ba6dd46

Branch: refs/heads/ozone-0.3
Commit: 4ba6dd46052c0eb34a97a154b1d448245a6184ee
Parents: 347ea38
Author: Nanda kumar 
Authored: Tue Oct 9 18:23:05 2018 +0530
Committer: Nanda kumar 
Committed: Tue Oct 9 22:30:31 2018 +0530

--
 .../common/src/main/resources/ozone-default.xml | 21 
 1 file changed, 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ba6dd46/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index d7cbd75..aee471e 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1106,27 +1106,6 @@
   
 
   
-hdds.write.lock.reporting.threshold.ms
-5000
-OZONE, DATANODE, MANAGEMENT
-
-  When a write lock is held for a long time, this will be logged as the
-  lock is released. This sets how long the lock must be held for logging
-  to occur.
-
-  
-
-  
-hdds.lock.suppress.warning.interval.ms
-1
-OZONE, DATANODE, MANAGEMENT
-
-  Instrumentation reporting long critical sections will suppress
-  consecutive warnings within this interval.
-
-  
-
-  
 hdds.command.status.report.interval
 30s
 OZONE, DATANODE, MANAGEMENT


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8853. [UI2] Application Attempts tab is not shown correctly when there are no attempts. Contributed by Akhil PB.

2018-10-09 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 ce4a0898d -> be0ce74c7


YARN-8853. [UI2] Application Attempts tab is not shown correctly when there are 
no attempts. Contributed by Akhil PB.

(cherry picked from commit 6f11919beb71f3bab7a49f2866c03bca5a4afc3b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be0ce74c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be0ce74c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be0ce74c

Branch: refs/heads/branch-3.1
Commit: be0ce74c7137726742910f1bfe3dbc5ec6bca9e9
Parents: ce4a089
Author: Sunil G 
Authored: Tue Oct 9 22:04:17 2018 +0530
Committer: Sunil G 
Committed: Tue Oct 9 22:06:21 2018 +0530

--
 .../src/main/webapp/app/templates/yarn-app/attempts.hbs| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be0ce74c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
index 62105d3..4aa47c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
@@ -30,7 +30,7 @@
 viewType=viewType
   }}
 {{else}}
-  
+  
 
   
  Application Attempts


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8853. [UI2] Application Attempts tab is not shown correctly when there are no attempts. Contributed by Akhil PB.

2018-10-09 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 71faacd17 -> f6a73d181


YARN-8853. [UI2] Application Attempts tab is not shown correctly when there are 
no attempts. Contributed by Akhil PB.

(cherry picked from commit 6f11919beb71f3bab7a49f2866c03bca5a4afc3b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6a73d18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6a73d18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6a73d18

Branch: refs/heads/branch-3.2
Commit: f6a73d181ff86496c3fe7512422f8f8100720855
Parents: 71faacd
Author: Sunil G 
Authored: Tue Oct 9 22:04:17 2018 +0530
Committer: Sunil G 
Committed: Tue Oct 9 22:05:53 2018 +0530

--
 .../src/main/webapp/app/templates/yarn-app/attempts.hbs| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6a73d18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
index 62105d3..4aa47c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
@@ -30,7 +30,7 @@
 viewType=viewType
   }}
 {{else}}
-  
+  
 
   
  Application Attempts


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8853. [UI2] Application Attempts tab is not shown correctly when there are no attempts. Contributed by Akhil PB.

2018-10-09 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9e9915dda -> 6f11919be


YARN-8853. [UI2] Application Attempts tab is not shown correctly when there are 
no attempts. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f11919b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f11919b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f11919b

Branch: refs/heads/trunk
Commit: 6f11919beb71f3bab7a49f2866c03bca5a4afc3b
Parents: 9e9915d
Author: Sunil G 
Authored: Tue Oct 9 22:04:17 2018 +0530
Committer: Sunil G 
Committed: Tue Oct 9 22:04:17 2018 +0530

--
 .../src/main/webapp/app/templates/yarn-app/attempts.hbs| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f11919b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
index 62105d3..4aa47c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
@@ -30,7 +30,7 @@
 viewType=viewType
   }}
 {{else}}
-  
+  
 
   
  Application Attempts


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-8468. Enable the use of queue based maximum container allocation limit and implement it in FairScheduler. Contributed by Antal Bálint Steinbach.

2018-10-09 Thread wwei
YARN-8468. Enable the use of queue based maximum container allocation limit and 
implement it in FairScheduler. Contributed by Antal Bálint Steinbach.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce4a0898
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce4a0898
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce4a0898

Branch: refs/heads/branch-3.1
Commit: ce4a0898df5e9891f0388aa65a754f0ff85b56ac
Parents: 665036c
Author: Weiwei Yang 
Authored: Tue Oct 9 22:30:42 2018 +0800
Committer: Weiwei Yang 
Committed: Tue Oct 9 22:30:42 2018 +0800

--
 .../server/resourcemanager/RMAppManager.java|  13 +-
 .../server/resourcemanager/RMServerUtils.java   |   9 +-
 .../scheduler/AbstractYarnScheduler.java|  21 +-
 .../scheduler/SchedulerUtils.java   |  62 ++
 .../scheduler/YarnScheduler.java|   9 +-
 .../scheduler/capacity/CapacityScheduler.java   |   7 +-
 .../processor/PlacementConstraintProcessor.java |  12 +-
 .../scheduler/fair/AllocationConfiguration.java |  13 ++
 .../scheduler/fair/FSLeafQueue.java |  10 +
 .../scheduler/fair/FSParentQueue.java   |  15 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |   7 +
 .../scheduler/fair/FairScheduler.java   |  52 +++--
 .../allocation/AllocationFileQueueParser.java   |   7 +
 .../fair/allocation/QueueProperties.java|  16 +-
 .../webapp/FairSchedulerPage.java   |   4 +
 .../webapp/dao/FairSchedulerQueueInfo.java  |   9 +-
 .../resourcemanager/AppManagerTestBase.java | 107 ++
 .../yarn/server/resourcemanager/MockRM.java |   8 +
 .../server/resourcemanager/TestAppManager.java  |  21 +-
 .../TestAppManagerWithFairScheduler.java| 175 +++
 .../resourcemanager/TestClientRMService.java| 104 -
 .../resourcemanager/TestRMServerUtils.java  |  98 -
 .../scheduler/TestSchedulerUtils.java   | 214 ++-
 .../capacity/TestCapacityScheduler.java |  39 +++-
 .../scheduler/fair/FairSchedulerTestBase.java   |   4 +-
 .../fair/TestAllocationFileLoaderService.java   |  30 ++-
 .../TestApplicationMasterServiceWithFS.java | 170 +++
 .../scheduler/fair/TestFairScheduler.java   | 140 ++--
 .../allocationfile/AllocationFileQueue.java |   6 +-
 .../AllocationFileQueueBuilder.java |   6 +
 .../AllocationFileQueueProperties.java  |  12 ++
 .../src/site/markdown/FairScheduler.md  |   2 +
 32 files changed, 1140 insertions(+), 262 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce4a0898/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index c54051e..8f578bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -86,7 +87,7 @@ public class RMAppManager implements 
EventHandler,
   private int maxCompletedAppsInMemory;
   private int maxCompletedAppsInStateStore;
   protected int completedAppsInStateStore = 0;
-  private LinkedList completedApps = new 
LinkedList();
+  protected LinkedList completedApps = new 
LinkedList();
 
   private final RMContext rmContext;
   private final ApplicationMasterService masterService;
@@ -526,13 +527,13 @@ public class RMAppManager implements 
EventHandler,
 
 // Normalize all requests
 String queue = submissionContext.getQueue();
+Resource maxAllocation = scheduler.getMaximumResourceCapability(queue);
 for (ResourceRequest amReq : amReqs) {
-  

[1/2] hadoop git commit: YARN-8468. Enable the use of queue based maximum container allocation limit and implement it in FairScheduler. Contributed by Antal Bálint Steinbach.

2018-10-09 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 665036c5f -> ce4a0898d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce4a0898/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
index 15cfdb0..ec452d7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
@@ -99,6 +99,7 @@ import org.junit.Test;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 import org.junit.rules.ExpectedException;
+import org.mockito.Mockito;
 
 public class TestSchedulerUtils {
 
@@ -165,12 +166,12 @@ public class TestSchedulerUtils {
   @Test (timeout = 3)
   public void testNormalizeRequest() {
 ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
-
+
 final int minMemory = 1024;
 final int maxMemory = 8192;
 Resource minResource = Resources.createResource(minMemory, 0);
 Resource maxResource = Resources.createResource(maxMemory, 0);
-
+
 ResourceRequest ask = new ResourceRequestPBImpl();
 
 // case negative memory
@@ -230,11 +231,11 @@ public class TestSchedulerUtils {
   @Test (timeout = 3)
   public void testNormalizeRequestWithDominantResourceCalculator() {
 ResourceCalculator resourceCalculator = new DominantResourceCalculator();
-
+
 Resource minResource = Resources.createResource(1024, 1);
 Resource maxResource = Resources.createResource(10240, 10);
 Resource clusterResource = Resources.createResource(10 * 1024, 10);
-
+
 ResourceRequest ask = new ResourceRequestPBImpl();
 
 // case negative memory/vcores
@@ -259,12 +260,12 @@ public class TestSchedulerUtils {
 assertEquals(1, ask.getCapability().getVirtualCores());
 assertEquals(2048, ask.getCapability().getMemorySize());
   }
-  
+
   @Test(timeout = 3)
   public void testValidateResourceRequestWithErrorLabelsPermission()
   throws IOException {
 // mock queue and scheduler
-YarnScheduler scheduler = mock(YarnScheduler.class);
+ResourceScheduler scheduler = mock(ResourceScheduler.class);
 Set queueAccessibleNodeLabels = Sets.newHashSet();
 QueueInfo queueInfo = mock(QueueInfo.class);
 when(queueInfo.getQueueName()).thenReturn("queue");
@@ -273,6 +274,8 @@ public class TestSchedulerUtils {
 when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean()))
 .thenReturn(queueInfo);
 
+when(rmContext.getScheduler()).thenReturn(scheduler);
+
 Resource maxResource = Resources.createResource(
 YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
 YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
@@ -291,20 +294,20 @@ public class TestSchedulerUtils {
   ResourceRequest resReq = BuilderUtils.newResourceRequest(
   mock(Priority.class), ResourceRequest.ANY, resource, 1);
   resReq.setNodeLabelExpression("x");
-  SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-  scheduler, rmContext);
+  normalizeAndvalidateRequest(resReq, "queue",
+  scheduler, rmContext, maxResource);
 
   resReq.setNodeLabelExpression("y");
-  SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-  scheduler, rmContext);
-  
+  normalizeAndvalidateRequest(resReq, "queue",
+  scheduler, rmContext, maxResource);
+
   resReq.setNodeLabelExpression("");
-  SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-  scheduler, rmContext);
-  
+  normalizeAndvalidateRequest(resReq, "queue",
+  scheduler, rmContext, maxResource);
+
   resReq.setNodeLabelExpression(" ");
-  SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-  scheduler, rmContext);
+  normalizeAndvalidateRequest(resReq, "queue",
+  scheduler, rmContext, maxResource);
 } catch (InvalidResourceRequestException e) {
   e.printStackTrace();
   fail("Should be valid when request labels is a subset of queue labels");
@@ -312,7 +315,7 @@ public class TestSchedulerUtils {
   

hadoop git commit: HDDS-599. Fix TestOzoneConfiguration TestOzoneConfigurationFields. Contributed by Sandeep Nemuri.

2018-10-09 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7367ff333 -> 9e9915dda


HDDS-599. Fix TestOzoneConfiguration TestOzoneConfigurationFields. Contributed 
by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e9915dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e9915dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e9915dd

Branch: refs/heads/trunk
Commit: 9e9915ddab7ff79224b075889f67c38920fde9f0
Parents: 7367ff3
Author: Nanda kumar 
Authored: Tue Oct 9 18:23:05 2018 +0530
Committer: Nanda kumar 
Committed: Tue Oct 9 18:23:05 2018 +0530

--
 .../common/src/main/resources/ozone-default.xml | 21 
 1 file changed, 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e9915dd/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index d7cbd75..aee471e 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1106,27 +1106,6 @@
   
 
   
-hdds.write.lock.reporting.threshold.ms
-5000
-OZONE, DATANODE, MANAGEMENT
-
-  When a write lock is held for a long time, this will be logged as the
-  lock is released. This sets how long the lock must be held for logging
-  to occur.
-
-  
-
-  
-hdds.lock.suppress.warning.interval.ms
-1
-OZONE, DATANODE, MANAGEMENT
-
-  Instrumentation reporting long critical sections will suppress
-  consecutive warnings within this interval.
-
-  
-
-  
 hdds.command.status.report.interval
 30s
 OZONE, DATANODE, MANAGEMENT


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-450. Generate BlockCommitSequenceId in ContainerStateMachine for every commit operation in Ratis. Contributed by Shashikant Banerjee.

2018-10-09 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5b1cfcaef -> 7367ff333


HDDS-450. Generate BlockCommitSequenceId in ContainerStateMachine for every 
commit operation in Ratis. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7367ff33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7367ff33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7367ff33

Branch: refs/heads/trunk
Commit: 7367ff333bf332b300e0acd6e7501ce8139a1998
Parents: 5b1cfca
Author: Nanda kumar 
Authored: Tue Oct 9 18:07:01 2018 +0530
Committer: Nanda kumar 
Committed: Tue Oct 9 18:07:01 2018 +0530

--
 .../hdds/scm/storage/ChunkOutputStream.java | 12 +++-
 .../scm/storage/ContainerProtocolCalls.java | 25 
 .../container/common/helpers/BlockData.java | 12 
 .../main/proto/DatanodeContainerProtocol.proto  |  2 ++
 .../server/ratis/ContainerStateMachine.java | 31 
 .../container/keyvalue/helpers/BlockUtils.java  |  5 +++-
 .../keyvalue/impl/BlockManagerImpl.java |  1 -
 .../ozone/client/io/ChunkGroupOutputStream.java | 16 +++---
 .../ozone/om/helpers/OmKeyLocationInfo.java | 17 +--
 .../src/main/proto/OzoneManagerProtocol.proto   |  1 +
 .../TestGetCommittedBlockLengthAndPutKey.java   |  4 ++-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  2 ++
 12 files changed, 99 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7367ff33/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 10b3bb5..cc1ea8d 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -65,6 +66,7 @@ public class ChunkOutputStream extends OutputStream {
   private final String streamId;
   private int chunkIndex;
   private int chunkSize;
+  private long blockCommitSequenceId;
 
   /**
* Creates a new ChunkOutputStream.
@@ -93,12 +95,17 @@ public class ChunkOutputStream extends OutputStream {
 this.buffer = ByteBuffer.allocate(chunkSize);
 this.streamId = UUID.randomUUID().toString();
 this.chunkIndex = 0;
+blockCommitSequenceId = 0;
   }
 
   public ByteBuffer getBuffer() {
 return buffer;
   }
 
+  public long getBlockCommitSequenceId() {
+return blockCommitSequenceId;
+  }
+
   @Override
   public void write(int b) throws IOException {
 checkOpen();
@@ -155,7 +162,10 @@ public class ChunkOutputStream extends OutputStream {
 writeChunkToContainer();
   }
   try {
-putBlock(xceiverClient, containerBlockData.build(), traceID);
+ContainerProtos.PutBlockResponseProto responseProto =
+putBlock(xceiverClient, containerBlockData.build(), traceID);
+blockCommitSequenceId =
+responseProto.getCommittedBlockLength().getBlockCommitSequenceId();
   } catch (IOException e) {
 throw new IOException(
 "Unexpected Storage Container Exception: " + e.toString(), e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7367ff33/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 6b7a328..1df50b1 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -141,24 +141,23 @@ public final class ContainerProtocolCalls  {
* @param xceiverClient client to perform call
* @param containerBlockData block data to identify container
* @param traceID container protocol call args
+   * @return putBlockResponse
* @throws IOException if there is an I/O error while performing the call
*/
-  public static void putBlock(XceiverClientSpi 

hadoop git commit: MAPREDUCE-7035. Skip javadoc build for auto-generated sources in hadoop-mapreduce-client. Contributed by Mukul Kumar Singh.

2018-10-09 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7ba1cfdea -> 5b1cfcaef


MAPREDUCE-7035. Skip javadoc build for auto-generated sources in 
hadoop-mapreduce-client. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b1cfcae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b1cfcae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b1cfcae

Branch: refs/heads/trunk
Commit: 5b1cfcaeff1bfa8fb736574f1a664da40b3317d2
Parents: 7ba1cfd
Author: Akira Ajisaka 
Authored: Tue Oct 9 21:11:49 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 9 21:13:04 2018 +0900

--
 .../hadoop-mapreduce-client-common/pom.xml   | 7 +++
 .../hadoop-mapreduce-client-core/pom.xml | 8 
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 7 +++
 3 files changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1cfcae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index 2c6e8e5..77fc45e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -80,6 +80,13 @@
   
 
   
+  
+org.apache.maven.plugins
+maven-javadoc-plugin
+
+  
org.apache.hadoop.yarn.proto:org.apache.hadoop.mapreduce.v2.proto:org.apache.hadoop.mapreduce.v2.hs.proto
+
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1cfcae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index bfac6a9..edd2b24 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -88,6 +88,14 @@
   
   
 org.apache.maven.plugins
+maven-javadoc-plugin
+
+  target/generated-sources/avro/
+  
org.apache.hadoop.mapreduce.jobhistory
+
+  
+  
+org.apache.maven.plugins
 maven-surefire-plugin

   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1cfcae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
index 9711597..a68c166 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -85,6 +85,13 @@
   
 
   
+  
+org.apache.maven.plugins
+maven-javadoc-plugin
+
+  
org.apache.hadoop.mapred.proto
+
+  
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15827. NPE in DynamoDBMetadataStore.lambda$listChildren for root + auth S3Guard. Contributed by Gabor Bota

2018-10-09 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk d4626b4d1 -> 7ba1cfdea


HADOOP-15827. NPE in DynamoDBMetadataStore.lambda$listChildren for root + auth 
S3Guard.
Contributed by Gabor Bota


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ba1cfde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ba1cfde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ba1cfde

Branch: refs/heads/trunk
Commit: 7ba1cfdea7f5daf799adc4f0ed2e45f55841a058
Parents: d4626b4
Author: Steve Loughran 
Authored: Tue Oct 9 10:46:41 2018 +0100
Committer: Steve Loughran 
Committed: Tue Oct 9 10:46:41 2018 +0100

--
 .../org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ba1cfde/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 5716cfa..f34afb5 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -630,7 +630,7 @@ public class DynamoDBMetadataStore implements MetadataStore 
{
   LOG.trace("Listing table {} in region {} for {} returning {}",
   tableName, region, path, metas);
 
-  return (metas.isEmpty() && dirPathMeta == null)
+  return (metas.isEmpty() || dirPathMeta == null)
   ? null
   : new DirListingMetadata(path, metas, isAuthoritative,
   dirPathMeta.getLastUpdated());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13967. HDFS Router Quota Class Review. Contributed by BELUGA BEHR.

2018-10-09 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9bbeb5248 -> d4626b4d1


HDFS-13967. HDFS Router Quota Class Review. Contributed by BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4626b4d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4626b4d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4626b4d

Branch: refs/heads/trunk
Commit: d4626b4d1825b60ef02c0da9c45cd483d1d98f49
Parents: 9bbeb52
Author: Yiqun Lin 
Authored: Tue Oct 9 16:11:07 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Oct 9 16:11:07 2018 +0800

--
 .../hdfs/server/federation/router/Quota.java| 54 ++--
 1 file changed, 26 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4626b4d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index d8ed080..5d0309f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedList;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -33,6 +35,9 @@ import 
org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
+
 /**
  * Module that implements the quota relevant RPC calls
  * {@link ClientProtocol#setQuota(String, long, long, StorageType)}
@@ -121,37 +126,31 @@ public class Quota {
 final List locations = getQuotaRemoteLocations(path);
 
 // NameService -> Locations
-Map> validLocations = new HashMap<>();
+ListMultimap validLocations =
+ArrayListMultimap.create();
+
 for (RemoteLocation loc : locations) {
-  String nsId = loc.getNameserviceId();
-  List dests = validLocations.get(nsId);
-  if (dests == null) {
-dests = new LinkedList<>();
-dests.add(loc);
-validLocations.put(nsId, dests);
-  } else {
-// Ensure the paths in the same nameservice is different.
-// Don't include parent-child paths.
-boolean isChildPath = false;
-for (RemoteLocation d : dests) {
-  if (loc.getDest().startsWith(d.getDest())) {
-isChildPath = true;
-break;
-  }
-}
+  final String nsId = loc.getNameserviceId();
+  final Collection dests = validLocations.get(nsId);
+
+  // Ensure the paths in the same nameservice is different.
+  // Do not include parent-child paths.
+  boolean isChildPath = false;
 
-if (!isChildPath) {
-  dests.add(loc);
+  for (RemoteLocation d : dests) {
+if (StringUtils.startsWith(loc.getDest(), d.getDest())) {
+  isChildPath = true;
+  break;
 }
   }
-}
 
-List quotaLocs = new LinkedList<>();
-for (List locs : validLocations.values()) {
-  quotaLocs.addAll(locs);
+  if (!isChildPath) {
+validLocations.put(nsId, loc);
+  }
 }
 
-return quotaLocs;
+return Collections
+.unmodifiableList(new ArrayList<>(validLocations.values()));
   }
 
   /**
@@ -209,7 +208,7 @@ public class Quota {
*/
   private List getQuotaRemoteLocations(String path)
   throws IOException {
-List locations = new LinkedList<>();
+List locations = new ArrayList<>();
 RouterQuotaManager manager = this.router.getQuotaManager();
 if (manager != null) {
   Set childrenPaths = manager.getPaths(path);
@@ -217,7 +216,6 @@ public class Quota {
 locations.addAll(rpcServer.getLocationsForPath(childPath, true, 
false));
   }
 }
-
 return locations;
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-588. SelfSignedCertificate#generateCertificate should sign the certificate the configured security provider. Contributed by Xiaoyu Yao.

2018-10-09 Thread ajay
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-4 81092150a -> e180547b4


HDDS-588. SelfSignedCertificate#generateCertificate should sign the certificate 
the configured security provider. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e180547b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e180547b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e180547b

Branch: refs/heads/HDDS-4
Commit: e180547b4a9abd2190c8711d289c0557b624e2c7
Parents: 8109215
Author: Ajay Kumar 
Authored: Tue Oct 9 00:28:01 2018 -0700
Committer: Ajay Kumar 
Committed: Tue Oct 9 00:28:01 2018 -0700

--
 .../hdds/security/x509/certificates/SelfSignedCertificate.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e180547b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
index fef7ac3..f221246 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/SelfSignedCertificate.java
@@ -103,8 +103,8 @@ public final class SelfSignedCertificate {
 
 
 ContentSigner contentSigner =
-new JcaContentSignerBuilder(
-config.getSignatureAlgo()).build(key.getPrivate());
+new JcaContentSignerBuilder(config.getSignatureAlgo())
+.setProvider(config.getProvider()).build(key.getPrivate());
 
 // Please note: Since this is a root certificate we use "ONE" as the
 // serial number. Also note that skip enforcing locale or UTC. We are


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org