[28/49] hadoop git commit: YARN-8902. [CSI] Add volume manager that manages CSI volume lifecycle. Contributed by Weiwei Yang.

2018-11-12 Thread brahma
YARN-8902. [CSI] Add volume manager that manages CSI volume lifecycle. 
Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e728444
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e728444
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e728444

Branch: refs/heads/HDFS-13891
Commit: 4e7284443e6cf8dac3cd5d2581730c87ae6ffb55
Parents: b5ec85d
Author: Sunil G 
Authored: Mon Nov 12 11:57:02 2018 +0530
Committer: Sunil G 
Committed: Mon Nov 12 11:57:02 2018 +0530

--
 .../volume/csi/CsiAdaptorClientProtocol.java|  34 +++
 .../yarn/server/volume/csi/CsiConstants.java|  37 +++
 .../volume/csi/VolumeCapabilityRange.java   | 107 
 .../hadoop/yarn/server/volume/csi/VolumeId.java |  59 +
 .../yarn/server/volume/csi/VolumeMetaData.java  | 227 +
 .../csi/exception/InvalidVolumeException.java   |  28 +++
 .../volume/csi/exception/VolumeException.java   |  34 +++
 .../exception/VolumeProvisioningException.java  |  32 +++
 .../volume/csi/exception/package-info.java  |  27 ++
 .../yarn/server/volume/csi/package-info.java|  27 ++
 .../resourcemanager/RMActiveServiceContext.java |  14 ++
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  12 +
 .../server/resourcemanager/ResourceManager.java |  14 ++
 .../volume/csi/CsiAdaptorClient.java|  36 +++
 .../volume/csi/VolumeBuilder.java   | 106 
 .../volume/csi/VolumeManager.java   |  63 +
 .../volume/csi/VolumeManagerImpl.java   | 108 
 .../volume/csi/VolumeStates.java|  60 +
 .../csi/event/ControllerPublishVolumeEvent.java |  30 +++
 .../volume/csi/event/ValidateVolumeEvent.java   |  30 +++
 .../volume/csi/event/VolumeEvent.java   |  43 
 .../volume/csi/event/VolumeEventType.java   |  29 +++
 .../volume/csi/event/package-info.java  |  27 ++
 .../volume/csi/lifecycle/Volume.java|  37 +++
 .../volume/csi/lifecycle/VolumeImpl.java| 199 +++
 .../volume/csi/lifecycle/VolumeState.java   |  35 +++
 .../volume/csi/lifecycle/package-info.java  |  27 ++
 .../volume/csi/package-info.java|  27 ++
 .../csi/processor/VolumeAMSProcessor.java   | 158 
 .../volume/csi/processor/package-info.java  |  27 ++
 .../csi/provisioner/VolumeProvisioner.java  |  32 +++
 .../provisioner/VolumeProvisioningResults.java  |  87 +++
 .../csi/provisioner/VolumeProvisioningTask.java |  66 +
 .../volume/csi/provisioner/package-info.java|  27 ++
 .../resourcemanager/volume/package-info.java|  27 ++
 .../volume/csi/TestVolumeCapabilityRange.java   |  67 +
 .../volume/csi/TestVolumeLifecycle.java | 161 
 .../volume/csi/TestVolumeMetaData.java  | 178 +
 .../volume/csi/TestVolumeProcessor.java | 250 +++
 40 files changed, 2594 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e728444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/CsiAdaptorClientProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/CsiAdaptorClientProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/CsiAdaptorClientProtocol.java
new file mode 100644
index 000..b894d4e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/CsiAdaptorClientProtocol.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.volume.csi;
+
+import 

[34/49] hadoop git commit: HDDS-576. Move ContainerWithPipeline creation to RPC endpoint. Contributed by Nanda kumar.

2018-11-12 Thread brahma
HDDS-576. Move ContainerWithPipeline creation to RPC endpoint.
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18fe65d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18fe65d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18fe65d7

Branch: refs/heads/HDFS-13891
Commit: 18fe65d7560b0bd61e3cf3ffbbaf98e87d82120f
Parents: 42f3a70
Author: Nanda kumar 
Authored: Mon Nov 12 23:32:31 2018 +0530
Committer: Nanda kumar 
Committed: Mon Nov 12 23:33:11 2018 +0530

--
 .../hadoop/hdds/scm/container/ContainerID.java  |   2 +-
 hadoop-hdds/common/src/main/proto/hdds.proto|  24 ++--
 .../CloseContainerCommandHandler.java   |   2 +-
 .../protocol/commands/CommandForDatanode.java   |   1 +
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  77 ++--
 .../block/DatanodeDeletedBlockTransactions.java |  40 +++
 .../hdds/scm/block/DeletedBlockLogImpl.java |  24 ++--
 .../container/CloseContainerEventHandler.java   | 116 +--
 .../hdds/scm/container/ContainerManager.java|  20 +---
 .../scm/container/ContainerStateManager.java|   4 +-
 .../hdds/scm/container/SCMContainerManager.java |  63 +-
 .../scm/server/SCMClientProtocolServer.java |  32 -
 .../scm/server/StorageContainerManager.java |   6 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |   2 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |   2 +-
 .../hdds/scm/block/TestDeletedBlockLog.java |  55 +
 .../TestCloseContainerEventHandler.java |  35 +++---
 .../container/TestContainerReportHandler.java   |   6 +-
 .../scm/container/TestSCMContainerManager.java  |  65 +--
 .../hdds/scm/node/TestContainerPlacement.java   |   7 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |   6 +-
 .../hdds/scm/pipeline/TestNodeFailure.java  |  37 +++---
 .../hdds/scm/pipeline/TestPipelineClose.java|  10 +-
 .../hdds/scm/pipeline/TestSCMRestart.java   |  19 +--
 .../org/apache/hadoop/ozone/OzoneTestUtils.java |   5 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |   7 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  31 +++--
 .../ozone/client/rpc/TestOzoneRpcClient.java|  21 ++--
 .../TestCloseContainerByPipeline.java   |  19 +--
 .../TestCloseContainerHandler.java  |  19 +--
 .../freon/TestFreonWithDatanodeFastRestart.java |   8 ++
 .../freon/TestFreonWithDatanodeRestart.java |   5 +
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |   2 +-
 33 files changed, 381 insertions(+), 391 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18fe65d7/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
index e7ac350..bb44da4 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -102,6 +102,6 @@ public final class ContainerID implements 
Comparable {
 
   @Override
   public String toString() {
-return "id=" + id;
+return "#" + id;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18fe65d7/hadoop-hdds/common/src/main/proto/hdds.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto 
b/hadoop-hdds/common/src/main/proto/hdds.proto
index a0c6f16..c37683a 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -111,24 +111,18 @@ message NodePool {
  */
 
 enum LifeCycleState {
-ALLOCATED = 1;
-CREATING = 2; // Used for container allocated/created by different client.
-OPEN =3; // Mostly an update to SCM via HB or client call.
-CLOSING = 4;
-CLOSED = 5; // !!State after this has not been used yet.
-DELETING = 6;
-DELETED = 7; // object is deleted.
+OPEN = 1;
+CLOSING = 2;
+CLOSED = 3;
+DELETING = 4;
+DELETED = 5; // object is deleted.
 }
 
 enum LifeCycleEvent {
-CREATE = 1; // A request to client to create this object
-CREATED = 2;
-FINALIZE = 3;
-CLOSE = 4; // !!Event after this has not been used yet.
-UPDATE = 5;
-TIMEOUT = 6; // creation has timed out from SCM's View.
-DELETE = 7;
-CLEANUP = 8;
+FINALIZE = 1;
+CLOSE = 2; // !!Event after this has not been used yet.
+DELETE = 3;
+CLEANUP = 4;
 }
 
 message ContainerInfoProto {


[44/49] hadoop git commit: HDFS-14024. RBF: ProvidedCapacityTotal json exception in NamenodeHeartbeatService. Contributed by CR Hota.

2018-11-12 Thread brahma
HDFS-14024. RBF: ProvidedCapacityTotal json exception in 
NamenodeHeartbeatService. Contributed by CR Hota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/832b2201
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/832b2201
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/832b2201

Branch: refs/heads/HDFS-13891
Commit: 832b2201634bd482e03bf14ce45cc8904d9ca7ce
Parents: 0dd9a27
Author: Inigo Goiri 
Authored: Thu Nov 1 11:49:33 2018 -0700
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:18:57 2018 +0530

--
 .../hdfs/server/federation/router/NamenodeHeartbeatService.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/832b2201/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
index a1adf77..1349aa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
@@ -351,7 +351,7 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 jsonObject.getLong("PendingReplicationBlocks"),
 jsonObject.getLong("UnderReplicatedBlocks"),
 jsonObject.getLong("PendingDeletionBlocks"),
-jsonObject.getLong("ProvidedCapacityTotal"));
+jsonObject.optLong("ProvidedCapacityTotal"));
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/49] hadoop git commit: MAPREDUCE-7148. Fast fail jobs when exceeds dfs quota limitation. Contributed by Wang Yan

2018-11-12 Thread brahma
MAPREDUCE-7148. Fast fail jobs when exceeds dfs quota limitation. Contributed 
by Wang Yan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b6625a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b6625a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b6625a9

Branch: refs/heads/HDFS-13891
Commit: 0b6625a9735f76ab473b41d8ab9b7f3c7678cfff
Parents: 8dc1f6d
Author: Jason Lowe 
Authored: Wed Nov 7 08:20:49 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 7 08:20:49 2018 -0600

--
 ...ClusterStorageCapacityExceededException.java |  51 
 .../hdfs/protocol/QuotaExceededException.java   |   5 +-
 .../org/apache/hadoop/mapred/YarnChild.java |  28 -
 .../org/apache/hadoop/mapred/TestYarnChild.java | 118 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java|   3 +
 .../src/main/resources/mapred-default.xml   |   9 ++
 6 files changed, 209 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b6625a9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
new file mode 100644
index 000..bbbf073
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Exception raised by HDFS indicating that storage capacity in the
+ * cluster filesystem is exceeded. See also
+ * https://issues.apache.org/jira/browse/MAPREDUCE-7148.
+ */
+@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
+@InterfaceStability.Evolving
+public class ClusterStorageCapacityExceededException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public ClusterStorageCapacityExceededException() {
+super();
+  }
+
+  public ClusterStorageCapacityExceededException(String message) {
+super(message);
+  }
+
+  public ClusterStorageCapacityExceededException(String message,
+  Throwable cause) {
+super(message, cause);
+  }
+
+  public ClusterStorageCapacityExceededException(Throwable cause) {
+super(cause);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b6625a9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
index f4e7f34..7033f3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
@@ -18,10 +18,9 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ClusterStorageCapacityExceededException;
 
 /**
  * This exception is thrown when modification to HDFS results in violation
@@ -37,7 +36,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class QuotaExceededException extends 

[01/49] hadoop git commit: YARN-8233. NPE in CapacityScheduler#tryCommit when handling allocate/reserve proposal whose allocatedOrReservedContainer is null. Contributed by Tao Yang. [Forced Update!]

2018-11-12 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-13891 f3113035e -> 053662e9f (forced update)


YARN-8233. NPE in CapacityScheduler#tryCommit when handling allocate/reserve 
proposal whose allocatedOrReservedContainer is null. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/951c98f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/951c98f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/951c98f8

Branch: refs/heads/HDFS-13891
Commit: 951c98f89059d64fda8456366f680eff4a7a6785
Parents: ba1f9d6
Author: Akira Ajisaka 
Authored: Wed Nov 7 11:17:35 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed Nov 7 11:17:35 2018 +0900

--
 .../scheduler/capacity/CapacityScheduler.java   | 86 +---
 .../TestCapacitySchedulerAsyncScheduling.java   | 83 +++
 2 files changed, 141 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/951c98f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 5d7f1ba..e604b81 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2771,7 +2771,11 @@ public class CapacityScheduler extends
 .getContainersToKill().isEmpty()) {
   list = new ArrayList<>();
   for (RMContainer rmContainer : csAssignment.getContainersToKill()) {
-list.add(getSchedulerContainer(rmContainer, false));
+SchedulerContainer schedulerContainer =
+getSchedulerContainer(rmContainer, false);
+if (schedulerContainer != null) {
+  list.add(schedulerContainer);
+}
   }
 }
 
@@ -2779,10 +2783,16 @@ public class CapacityScheduler extends
   if (null == list) {
 list = new ArrayList<>();
   }
-  list.add(
-  getSchedulerContainer(csAssignment.getExcessReservation(), false));
+  SchedulerContainer schedulerContainer =
+  getSchedulerContainer(csAssignment.getExcessReservation(), false);
+  if (schedulerContainer != null) {
+list.add(schedulerContainer);
+  }
 }
 
+if (list != null && list.isEmpty()) {
+  list = null;
+}
 return list;
   }
 
@@ -2867,11 +2877,15 @@ public class CapacityScheduler extends
   ((RMContainerImpl)rmContainer).setAllocationTags(
   new HashSet<>(schedulingRequest.getAllocationTags()));
 
-  allocated = new ContainerAllocationProposal<>(
-  getSchedulerContainer(rmContainer, true),
-  null, null, NodeType.NODE_LOCAL, NodeType.NODE_LOCAL,
-  SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY,
-  resource);
+  SchedulerContainer
+  schedulerContainer = getSchedulerContainer(rmContainer, true);
+  if (schedulerContainer == null) {
+allocated = null;
+  } else {
+allocated = new ContainerAllocationProposal<>(schedulerContainer,
+null, null, NodeType.NODE_LOCAL, NodeType.NODE_LOCAL,
+SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, resource);
+  }
 }
 
 if (null != allocated) {
@@ -2901,16 +2915,27 @@ public class CapacityScheduler extends
   csAssignment.getAssignmentInformation().getAllocationDetails();
   if (!allocations.isEmpty()) {
 RMContainer rmContainer = allocations.get(0).rmContainer;
-allocated = new ContainerAllocationProposal<>(
-getSchedulerContainer(rmContainer, true),
-getSchedulerContainersToRelease(csAssignment),
-getSchedulerContainer(csAssignment.getFulfilledReservedContainer(),
-false), csAssignment.getType(),
-csAssignment.getRequestLocalityType(),
-csAssignment.getSchedulingMode() != null ?
-csAssignment.getSchedulingMode() :
-SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY,
-csAssignment.getResource());
+SchedulerContainer
+  

[22/49] hadoop git commit: HDDS-733. Create container if not exist, as part of chunk write. Contributed by Lokesh Jain.

2018-11-12 Thread brahma
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe50b49/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index bde3bc9..7d002c3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -616,4 +616,20 @@ public final class ContainerTestHelper {
 }
 return false;
   }
+
+  public static boolean isContainerPresent(MiniOzoneCluster cluster,
+  long containerID, DatanodeDetails datanode) {
+for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) {
+  if (datanode.equals(datanodeService.getDatanodeDetails())) {
+Container container =
+datanodeService.getDatanodeStateMachine().getContainer()
+.getContainerSet().getContainer(containerID);
+if (container != null) {
+  return true;
+}
+  }
+}
+return false;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe50b49/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 733ed85..98a27bf 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -171,7 +171,6 @@ public class KeyManagerImpl implements KeyManager {
 }
 OmKeyLocationInfo info = new OmKeyLocationInfo.Builder()
 .setBlockID(new BlockID(allocatedBlock.getBlockID()))
-.setShouldCreateContainer(allocatedBlock.getCreateContainer())
 .setLength(scmBlockSize)
 .setOffset(0)
 .build();
@@ -235,7 +234,6 @@ public class KeyManagerImpl implements KeyManager {
 }
 OmKeyLocationInfo subKeyInfo = new OmKeyLocationInfo.Builder()
 .setBlockID(new BlockID(allocatedBlock.getBlockID()))
-.setShouldCreateContainer(allocatedBlock.getCreateContainer())
 .setLength(allocateSize)
 .setOffset(0)
 .build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe50b49/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
index 2076ced..5f8e939 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
@@ -122,8 +122,7 @@ public class ScmBlockLocationTestIngClient implements 
ScmBlockLocationProtocol {
 AllocatedBlock.Builder abb =
 new AllocatedBlock.Builder()
 .setContainerBlockID(new ContainerBlockID(containerID, localID))
-.setPipeline(pipeline)
-.setShouldCreateContainer(false);
+.setPipeline(pipeline);
 return abb.build();
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/49] hadoop git commit: HDDS-737. Introduce Incremental Container Report. Contributed by Nanda kumar.

2018-11-12 Thread brahma
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index c1409cb..5f419d3 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -39,10 +39,10 @@ import 
org.apache.hadoop.hdds.scm.block.PendingDeleteHandler;
 import org.apache.hadoop.hdds.scm.chillmode.SCMChillModeManager;
 import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
-import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
 import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.replication
@@ -221,7 +221,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 CommandStatusReportHandler cmdStatusReportHandler =
 new CommandStatusReportHandler();
 
-NewNodeHandler newNodeHandler = new NewNodeHandler(scmNodeManager);
+NewNodeHandler newNodeHandler = new NewNodeHandler();
 StaleNodeHandler staleNodeHandler =
 new StaleNodeHandler(scmNodeManager, pipelineManager);
 DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
@@ -231,8 +231,12 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
 
 ContainerReportHandler containerReportHandler =
-new ContainerReportHandler(containerManager, scmNodeManager,
-replicationStatus);
+new ContainerReportHandler(scmNodeManager, pipelineManager,
+containerManager, replicationStatus);
+
+IncrementalContainerReportHandler incrementalContainerReportHandler =
+new IncrementalContainerReportHandler(
+pipelineManager, containerManager);
 
 PipelineActionHandler pipelineActionHandler =
 new PipelineActionHandler(pipelineManager);
@@ -258,13 +262,6 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 replicationManager = new ReplicationManager(containerPlacementPolicy,
 containerManager, eventQueue, commandWatcherLeaseManager);
 
-// setup CloseContainer watcher
-CloseContainerWatcher closeContainerWatcher =
-new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-SCMEvents.CLOSE_CONTAINER_STATUS, commandWatcherLeaseManager,
-containerManager);
-closeContainerWatcher.start(eventQueue);
-
 scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
 .OZONE_ADMINISTRATORS);
 scmUsername = UserGroupInformation.getCurrentUser().getUserName();
@@ -282,6 +279,8 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, 
scmNodeManager);
 eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
 eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler);
+eventQueue.addHandler(SCMEvents.INCREMENTAL_CONTAINER_REPORT,
+incrementalContainerReportHandler);
 eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
 eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
 eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
index afa25e2..279acf0 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
@@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory;
 import java.util.ArrayList;
 import java.util.Collections;
 

[13/49] hadoop git commit: HADOOP-15846. ABFS: fix mask related bugs in setAcl, modifyAclEntries and removeAclEntries.

2018-11-12 Thread brahma
HADOOP-15846. ABFS: fix mask related bugs in setAcl, modifyAclEntries and 
removeAclEntries.

Contributed by Junhua Gu.

(cherry picked from commit 66715005f9e8f4f25faa352a06d142b75a029f0e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/724c1500
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/724c1500
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/724c1500

Branch: refs/heads/HDFS-13891
Commit: 724c15007beb9ca36f2f37c829f1daa366d95bbf
Parents: 9317a61
Author: Junhua Gu 
Authored: Thu Nov 8 17:21:40 2018 +
Committer: Steve Loughran 
Committed: Thu Nov 8 17:21:40 2018 +

--
 .../fs/azurebfs/AzureBlobFileSystemStore.java   |  20 +--
 .../fs/azurebfs/services/AbfsAclHelper.java |  89 +--
 .../azurebfs/ITestAzureBlobFilesystemAcl.java   | 147 ++-
 3 files changed, 225 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/724c1500/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index 1ac1761..bfdbba8 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -626,17 +626,7 @@ public class AzureBlobFileSystemStore {
 
 final Map aclEntries = 
AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL));
 
-for (Map.Entry modifyAclEntry : 
modifyAclEntries.entrySet()) {
-  aclEntries.put(modifyAclEntry.getKey(), modifyAclEntry.getValue());
-}
-
-if (!modifyAclEntries.containsKey(AbfsHttpConstants.ACCESS_MASK)) {
-  aclEntries.remove(AbfsHttpConstants.ACCESS_MASK);
-}
-
-if (!modifyAclEntries.containsKey(AbfsHttpConstants.DEFAULT_MASK)) {
-  aclEntries.remove(AbfsHttpConstants.DEFAULT_MASK);
-}
+AbfsAclHelper.modifyAclEntriesInternal(aclEntries, modifyAclEntries);
 
 client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, 
true),
 AbfsAclHelper.serializeAclSpec(aclEntries), eTag);
@@ -736,12 +726,8 @@ public class AzureBlobFileSystemStore {
 final String eTag = 
op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG);
 
 final Map getAclEntries = 
AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL));
-for (Map.Entry ace : getAclEntries.entrySet()) {
-  if (ace.getKey().startsWith("default:") && (ace.getKey() != 
AbfsHttpConstants.DEFAULT_MASK)
-  && !aclEntries.containsKey(ace.getKey())) {
-aclEntries.put(ace.getKey(), ace.getValue());
-  }
-}
+
+AbfsAclHelper.setAclEntriesInternal(aclEntries, getAclEntries);
 
 client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, 
true),
 AbfsAclHelper.serializeAclSpec(aclEntries), eTag);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/724c1500/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
index c28da2c..34959a6 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
@@ -44,12 +44,17 @@ public final class AbfsAclHelper {
 // not called
   }
 
-  public static Map deserializeAclSpec(final String 
aclSpecString) {
+  public static Map deserializeAclSpec(final String 
aclSpecString) throws AzureBlobFileSystemException {
 final Map aclEntries  = new HashMap<>();
-final String[] aclArray = aclSpecString.split(AbfsHttpConstants.COMMA);
-for (String acl : aclArray) {
-  int idx = acl.lastIndexOf(AbfsHttpConstants.COLON);
-  aclEntries.put(acl.substring(0, idx), acl.substring(idx + 1));
+final String[] aceArray = aclSpecString.split(AbfsHttpConstants.COMMA);
+for (String ace : aceArray) {
+  int idx = ace.lastIndexOf(AbfsHttpConstants.COLON);
+  final String key = ace.substring(0, idx);
+  final String val = ace.substring(idx + 1);
+  if (aclEntries.containsKey(key)) {
+throw new 

[07/49] hadoop git commit: YARN-8977. Remove unnecessary type casting when calling AbstractYarnScheduler#getSchedulerNode. Contributed by Wanqiang Ji.

2018-11-12 Thread brahma
YARN-8977. Remove unnecessary type casting when calling 
AbstractYarnScheduler#getSchedulerNode. Contributed by Wanqiang Ji.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c96cbe86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c96cbe86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c96cbe86

Branch: refs/heads/HDFS-13891
Commit: c96cbe8659587cfc114a96aab1be5cc85029fe44
Parents: 0b6625a
Author: Weiwei Yang 
Authored: Wed Nov 7 22:27:57 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Nov 7 22:42:22 2018 +0800

--
 .../resourcemanager/scheduler/capacity/CapacityScheduler.java  | 6 +++---
 .../scheduler/fair/TestContinuousScheduling.java   | 3 +--
 .../resourcemanager/scheduler/fair/TestFairScheduler.java  | 3 +--
 3 files changed, 5 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c96cbe86/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index e604b81..618ee20 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2192,8 +2192,8 @@ public class CapacityScheduler extends
 super.completedContainer(killableContainer, SchedulerUtils
 .createPreemptedContainerStatus(killableContainer.getContainerId(),
 SchedulerUtils.PREEMPTED_CONTAINER), 
RMContainerEventType.KILL);
-  } else{
-FiCaSchedulerNode node = (FiCaSchedulerNode) getSchedulerNode(
+  } else {
+FiCaSchedulerNode node = getSchedulerNode(
 killableContainer.getAllocatedNode());
 
 FiCaSchedulerApp application = getCurrentAttemptForContainer(
@@ -2225,7 +2225,7 @@ public class CapacityScheduler extends
 + nonKillableContainer.toString());
   }
 
-  FiCaSchedulerNode node = (FiCaSchedulerNode) getSchedulerNode(
+  FiCaSchedulerNode node = getSchedulerNode(
   nonKillableContainer.getAllocatedNode());
 
   FiCaSchedulerApp application = getCurrentAttemptForContainer(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c96cbe86/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index 2512787..e6a841a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
@@ -376,8 +376,7 @@ public class TestContinuousScheduling extends 
FairSchedulerTestBase {
 
 NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
 scheduler.handle(nodeEvent1);
-FSSchedulerNode node =
-(FSSchedulerNode) scheduler.getSchedulerNode(node1.getNodeID());
+FSSchedulerNode node = scheduler.getSchedulerNode(node1.getNodeID());
 // Tick the time and let the fsApp startTime different from initScheduler
 // time
 mockClock.tickSec(delayThresholdTimeMs / 1000);


[49/49] hadoop git commit: HDFS-13845. RBF: The default MountTableResolver should fail resolving multi-destination paths. Contributed by yanghuafeng.

2018-11-12 Thread brahma
HDFS-13845. RBF: The default MountTableResolver should fail resolving 
multi-destination paths. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0dd9a27f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0dd9a27f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0dd9a27f

Branch: refs/heads/HDFS-13891
Commit: 0dd9a27fdced779c60c16e777aabb050b2da5af2
Parents: b94d1d4
Author: Brahma Reddy Battula 
Authored: Tue Oct 30 11:21:08 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:18:57 2018 +0530

--
 .../federation/resolver/MountTableResolver.java | 15 +--
 .../resolver/TestMountTableResolver.java| 45 
 .../router/TestDisableNameservices.java | 36 ++--
 3 files changed, 70 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd9a27f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 121469f..9e69840 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -539,21 +539,28 @@ public class MountTableResolver
* @param entry Mount table entry.
* @return PathLocation containing the namespace, local path.
*/
-  private static PathLocation buildLocation(
-  final String path, final MountTable entry) {
-
+  private PathLocation buildLocation(
+  final String path, final MountTable entry) throws IOException {
 String srcPath = entry.getSourcePath();
 if (!path.startsWith(srcPath)) {
   LOG.error("Cannot build location, {} not a child of {}", path, srcPath);
   return null;
 }
+
+List dests = entry.getDestinations();
+if (getClass() == MountTableResolver.class && dests.size() > 1) {
+  throw new IOException("Cannnot build location, "
+  + getClass().getSimpleName()
+  + " should not resolve multiple destinations for " + path);
+}
+
 String remainingPath = path.substring(srcPath.length());
 if (remainingPath.startsWith(Path.SEPARATOR)) {
   remainingPath = remainingPath.substring(1);
 }
 
 List locations = new LinkedList<>();
-for (RemoteLocation oneDst : entry.getDestinations()) {
+for (RemoteLocation oneDst : dests) {
   String nsId = oneDst.getNameserviceId();
   String dest = oneDst.getDest();
   String newPath = dest;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd9a27f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
index 5e3b861..14ccb61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -79,6 +79,8 @@ public class TestMountTableResolver {
* __usr
* bin -> 2:/bin
* __readonly -> 2:/tmp
+   * __multi -> 5:/dest1
+   *6:/dest2
*
* @throws IOException If it cannot set the mount table.
*/
@@ -126,6 +128,12 @@ public class TestMountTableResolver {
 MountTable readOnlyEntry = MountTable.newInstance("/readonly", map);
 readOnlyEntry.setReadOnly(true);
 mountTable.addEntry(readOnlyEntry);
+
+// /multi
+map = getMountTableEntry("5", "/dest1");
+map.put("6", "/dest2");
+MountTable multiEntry = MountTable.newInstance("/multi", map);
+mountTable.addEntry(multiEntry);
   }
 
   @Before
@@ -201,6 +209,17 @@ public class TestMountTableResolver {
 }
   }
 
+  @Test
+  public void testMuiltipleDestinations() throws IOException {
+try {
+  mountTable.getDestinationForPath("/multi");
+  fail("The getDestinationForPath call should fail.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(

[24/49] hadoop git commit: HDDS-826. Update Ratis to 0.3.0-6f3419a-SNAPSHOT.

2018-11-12 Thread brahma
HDDS-826. Update Ratis to 0.3.0-6f3419a-SNAPSHOT.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/298d2502
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/298d2502
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/298d2502

Branch: refs/heads/HDFS-13891
Commit: 298d2502b0255270c829225373a456a5560aac73
Parents: 9fe50b4
Author: Tsz Wo Nicholas Sze 
Authored: Fri Nov 9 18:35:40 2018 -0800
Committer: Tsz Wo Nicholas Sze 
Committed: Fri Nov 9 18:35:40 2018 -0800

--
 hadoop-hdds/pom.xml  | 2 +-
 hadoop-ozone/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/298d2502/hadoop-hdds/pom.xml
--
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 7a1704c..a6b0d84 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -45,7 +45,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 0.4.0-SNAPSHOT
 
 
-0.3.0-1d07b18-SNAPSHOT
+0.3.0-6f3419a-SNAPSHOT
 
 1.60
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/298d2502/hadoop-ozone/pom.xml
--
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 671421e..5bd64a8 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -33,7 +33,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 3.2.1-SNAPSHOT
 0.4.0-SNAPSHOT
 0.4.0-SNAPSHOT
-0.3.0-1d07b18-SNAPSHOT
+0.3.0-6f3419a-SNAPSHOT
 1.60
 Badlands
 ${ozone.version}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/49] hadoop git commit: YARN-8976. Remove redundant modifiers in interface ApplicationConstants. Contributed by Zhankun Tang.

2018-11-12 Thread brahma
YARN-8976. Remove redundant modifiers in interface ApplicationConstants. 
Contributed by Zhankun Tang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/482716e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/482716e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/482716e5

Branch: refs/heads/HDFS-13891
Commit: 482716e5a4d1edfd3aa6a1ae65a58652f89375f1
Parents: 951c98f
Author: Weiwei Yang 
Authored: Wed Nov 7 10:48:07 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Nov 7 10:48:07 2018 +0800

--
 .../hadoop/yarn/api/ApplicationConstants.java   | 22 ++--
 1 file changed, 11 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/482716e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index eb03fb2..f5d8f02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -39,12 +39,12 @@ public interface ApplicationConstants {
* The environment variable for APP_SUBMIT_TIME. Set in AppMaster environment
* only
*/
-  public static final String APP_SUBMIT_TIME_ENV = "APP_SUBMIT_TIME_ENV";
+  String APP_SUBMIT_TIME_ENV = "APP_SUBMIT_TIME_ENV";
 
   /**
* The cache file into which container token is written
*/
-  public static final String CONTAINER_TOKEN_FILE_ENV_NAME =
+  String CONTAINER_TOKEN_FILE_ENV_NAME =
   UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION;
 
   /**
@@ -74,14 +74,14 @@ public interface ApplicationConstants {
* ApplicationMaster's environment only. This states that for all 
non-relative
* web URLs in the app masters web UI what base should they have.
*/
-  public static final String APPLICATION_WEB_PROXY_BASE_ENV =
+  String APPLICATION_WEB_PROXY_BASE_ENV =
 "APPLICATION_WEB_PROXY_BASE";
 
   /**
* The temporary environmental variable for container log directory. This
* should be replaced by real container log directory on container launch.
*/
-  public static final String LOG_DIR_EXPANSION_VAR = "";
+  String LOG_DIR_EXPANSION_VAR = "";
 
   /**
* This constant is used to construct class path and it will be replaced with
@@ -92,7 +92,7 @@ public interface ApplicationConstants {
*/
   @Public
   @Unstable
-  public static final String CLASS_PATH_SEPARATOR= "";
+  String CLASS_PATH_SEPARATOR= "";
 
   /**
* The following two constants are used to expand parameter and it will be
@@ -105,7 +105,7 @@ public interface ApplicationConstants {
*/
   @Public
   @Unstable
-  public static final String PARAMETER_EXPANSION_LEFT="{{";
+  String PARAMETER_EXPANSION_LEFT="{{";
 
   /**
* User has to use this constant to construct class path if user wants
@@ -114,11 +114,11 @@ public interface ApplicationConstants {
*/
   @Public
   @Unstable
-  public static final String PARAMETER_EXPANSION_RIGHT="}}";
+  String PARAMETER_EXPANSION_RIGHT="}}";
 
-  public static final String STDERR = "stderr";
+  String STDERR = "stderr";
 
-  public static final String STDOUT = "stdout";
+  String STDOUT = "stdout";
 
   /**
* The type of launch for the container.
@@ -136,7 +136,7 @@ public interface ApplicationConstants {
* Some of the environment variables for applications are final
* i.e. they cannot be modified by the applications.
*/
-  public enum Environment {
+  enum Environment {
 /**
  * $USER
  * Final, non-modifiable.
@@ -283,7 +283,7 @@ public interface ApplicationConstants {
 "YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE");
 
 private final String variable;
-private Environment(String variable) {
+Environment(String variable) {
   this.variable = variable;
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/49] hadoop git commit: HADOOP-15110. Gauges are getting logged in exceptions from AutoRenewalThreadForUserCreds.

2018-11-12 Thread brahma
HADOOP-15110. Gauges are getting logged in exceptions from 
AutoRenewalThreadForUserCreds.

Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c9d97b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c9d97b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c9d97b8

Branch: refs/heads/HDFS-13891
Commit: 3c9d97b8f7d6eb75f08fc6d37cee37c22760bb86
Parents: c741109
Author: Steve Loughran 
Authored: Mon Nov 12 12:58:05 2018 +
Committer: Steve Loughran 
Committed: Mon Nov 12 12:58:05 2018 +

--
 .../org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java | 7 +++
 .../java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java  | 7 +++
 .../java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java | 6 ++
 3 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9d97b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
index b16eda2..6a52bf3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
@@ -77,4 +77,11 @@ public class MutableGaugeFloat extends MutableGauge {
   }
 }
   }
+
+  /**
+   * @return  the value of the metric
+   */
+  public String toString() {
+return value.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9d97b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
index cce4528..8983900 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
@@ -87,4 +87,11 @@ public class MutableGaugeInt extends MutableGauge {
   clearChanged();
 }
   }
+
+  /**
+   * @return  the value of the metric
+   */
+  public String toString() {
+return value.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9d97b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
index a2a8632..a452518 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
@@ -88,4 +88,10 @@ public class MutableGaugeLong extends MutableGauge {
 }
   }
 
+  /**
+   * @return  the value of the metric
+   */
+  public String toString() {
+return value.toString();
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/49] hadoop git commit: HDDS-802. Container State Manager should get open pipelines for allocating container. Contributed by Lokesh Jain.

2018-11-12 Thread brahma
HDDS-802. Container State Manager should get open pipelines for allocating 
container. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9317a61f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9317a61f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9317a61f

Branch: refs/heads/HDFS-13891
Commit: 9317a61f3cdc5ca91c6934eec9898cee3d65441a
Parents: c80f753
Author: Yiqun Lin 
Authored: Thu Nov 8 23:41:43 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Nov 8 23:41:43 2018 +0800

--
 .../scm/container/ContainerStateManager.java|  4 +-
 .../hdds/scm/pipeline/PipelineManager.java  |  3 +
 .../hdds/scm/pipeline/PipelineStateManager.java |  5 ++
 .../hdds/scm/pipeline/PipelineStateMap.java | 22 +++
 .../hdds/scm/pipeline/SCMPipelineManager.java   | 11 
 .../scm/pipeline/TestPipelineStateManager.java  | 61 ++--
 6 files changed, 100 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9317a61f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 87505c3..74c8dcb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -248,8 +248,8 @@ public class ContainerStateManager {
 try {
   pipeline = pipelineManager.createPipeline(type, replicationFactor);
 } catch (IOException e) {
-  final List pipelines =
-  pipelineManager.getPipelines(type, replicationFactor);
+  final List pipelines = pipelineManager
+  .getPipelines(type, replicationFactor, Pipeline.PipelineState.OPEN);
   if (pipelines.isEmpty()) {
 throw new IOException("Could not allocate container");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9317a61f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index 04ec535..cce09f3 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -46,6 +46,9 @@ public interface PipelineManager extends Closeable {
   List getPipelines(ReplicationType type,
   ReplicationFactor factor);
 
+  List getPipelines(ReplicationType type,
+  ReplicationFactor factor, Pipeline.PipelineState state);
+
   void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID)
   throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9317a61f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index 67f74d3..9f95378 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -64,6 +64,11 @@ class PipelineStateManager {
 return pipelineStateMap.getPipelines(type, factor);
   }
 
+  List getPipelines(ReplicationType type, ReplicationFactor factor,
+  PipelineState state) {
+return pipelineStateMap.getPipelines(type, factor, state);
+  }
+
   List getPipelines(ReplicationType type, PipelineState... states) {
 return pipelineStateMap.getPipelines(type, states);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9317a61f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
index 7b69491..85790b2 100644
--- 

[17/49] hadoop git commit: YARN-8990. Fix fair scheduler race condition in app submit and queue cleanup. (Contributed by Wilfred Spiegelenburg)

2018-11-12 Thread brahma
YARN-8990. Fix fair scheduler race condition in app submit and queue cleanup. 
(Contributed by Wilfred Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/524a7523
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/524a7523
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/524a7523

Branch: refs/heads/HDFS-13891
Commit: 524a7523c427b55273133078898ae3535897bada
Parents: 89b4916
Author: Haibo Chen 
Authored: Thu Nov 8 16:02:48 2018 -0800
Committer: Haibo Chen 
Committed: Thu Nov 8 16:02:48 2018 -0800

--
 .../scheduler/fair/FSLeafQueue.java |  14 +++
 .../scheduler/fair/FairScheduler.java   |  19 +++-
 .../scheduler/fair/QueueManager.java| 113 +--
 3 files changed, 104 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/524a7523/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 7e4dab8..a038887 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -651,4 +651,18 @@ public class FSLeafQueue extends FSQueue {
   writeLock.unlock();
 }
   }
+
+  /**
+   * This method is called when an application is removed from this queue
+   * during the submit process.
+   * @param applicationId the application's id
+   */
+  public void removeAssignedApp(ApplicationId applicationId) {
+writeLock.lock();
+try {
+  assignedApps.remove(applicationId);
+} finally {
+  writeLock.unlock();
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/524a7523/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index da5e4c9..e5d2a06 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -473,7 +473,7 @@ public class FairScheduler extends
 writeLock.lock();
 try {
   RMApp rmApp = rmContext.getRMApps().get(applicationId);
-  FSLeafQueue queue = assignToQueue(rmApp, queueName, user);
+  FSLeafQueue queue = assignToQueue(rmApp, queueName, user, applicationId);
   if (queue == null) {
 return;
   }
@@ -499,6 +499,7 @@ public class FairScheduler extends
   applicationId, queue.getName(),
   invalidAMResourceRequests, queue.getMaxShare());
   rejectApplicationWithMessage(applicationId, msg);
+  queue.removeAssignedApp(applicationId);
   return;
 }
   }
@@ -513,6 +514,7 @@ public class FairScheduler extends
 + " cannot submit applications to queue " + queue.getName()
 + "(requested queuename is " + queueName + ")";
 rejectApplicationWithMessage(applicationId, msg);
+queue.removeAssignedApp(applicationId);
 return;
   }
 
@@ -520,7 +522,6 @@ public class FairScheduler extends
   new SchedulerApplication(queue, user);
   applications.put(applicationId, application);
   queue.getMetrics().submitApp(user);
-  queue.addAssignedApp(applicationId);
 
   LOG.info("Accepted application " + applicationId + " from user: " + user
   + ", in 

[19/49] hadoop git commit: HDDS-823. OzoneRestClient is failing with NPE on getKeyDetails call. Contributed by Bharat Viswanadham.

2018-11-12 Thread brahma
HDDS-823. OzoneRestClient is failing with NPE on getKeyDetails call. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47194fef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47194fef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47194fef

Branch: refs/heads/HDFS-13891
Commit: 47194fefdd7a225399342af1960cbd219a9b4763
Parents: 66bf624
Author: Bharat Viswanadham 
Authored: Thu Nov 8 21:28:04 2018 -0800
Committer: Bharat Viswanadham 
Committed: Thu Nov 8 21:28:04 2018 -0800

--
 .../hadoop/ozone/web/storage/DistributedStorageHandler.java | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47194fef/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
--
diff --git 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
index 88f2d6e..a8df114 100644
--- 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
+++ 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.web.storage;
 
 import com.google.common.base.Strings;
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.retry.RetryPolicy;
@@ -486,6 +487,7 @@ public final class DistributedStorageHandler implements 
StorageHandler {
 HddsClientUtils.formatDateTime(omKeyInfo.getCreationTime()));
 keyInfo.setModifiedOn(
 HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime()));
+keyInfo.setType(ReplicationType.valueOf(omKeyInfo.getType().toString()));
 return keyInfo;
   }
 
@@ -510,6 +512,8 @@ public final class DistributedStorageHandler implements 
StorageHandler {
 keyInfoDetails.setModifiedOn(
 HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime()));
 keyInfoDetails.setKeyLocations(keyLocations);
+keyInfoDetails.setType(ReplicationType.valueOf(omKeyInfo.getType()
+.toString()));
 return keyInfoDetails;
   }
 
@@ -553,6 +557,7 @@ public final class DistributedStorageHandler implements 
StorageHandler {
 HddsClientUtils.formatDateTime(info.getCreationTime()));
 tempInfo.setModifiedOn(
 HddsClientUtils.formatDateTime(info.getModificationTime()));
+tempInfo.setType(ReplicationType.valueOf(info.getType().toString()));
 
 result.addKey(tempInfo);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/49] hadoop git commit: HDFS-12284. addendum to HDFS-12284. Contributed by Inigo Goiri.

2018-11-12 Thread brahma
HDFS-12284. addendum to HDFS-12284. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/053662e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/053662e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/053662e9

Branch: refs/heads/HDFS-13891
Commit: 053662e9fa95375716f699b5889a14118066da45
Parents: a47397c
Author: Brahma Reddy Battula 
Authored: Wed Nov 7 07:37:02 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:18:57 2018 +0530

--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/053662e9/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index 9f515bc..947c91a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -36,7 +36,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   org.bouncycastle
-  bcprov-jdk16
+  bcprov-jdk15on
   test
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/49] hadoop git commit: HDDS-709. Modify Close Container handling sequence on datanodes. Contributed by Shashikant Banerjee.

2018-11-12 Thread brahma
HDDS-709. Modify Close Container handling sequence on datanodes. Contributed by 
Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f944f338
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f944f338
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f944f338

Branch: refs/heads/HDFS-13891
Commit: f944f3383246450a1aa2b34f55f99a9e86e10c42
Parents: 1f9c4f3
Author: Jitendra Pandey 
Authored: Mon Nov 12 14:08:39 2018 -0800
Committer: Jitendra Pandey 
Committed: Mon Nov 12 14:08:39 2018 -0800

--
 .../helpers/ContainerNotOpenException.java  |  36 +++
 .../helpers/InvalidContainerStateException.java |  35 ++
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../container/common/impl/HddsDispatcher.java   | 106 ---
 .../common/interfaces/ContainerDispatcher.java  |  10 ++
 .../CloseContainerCommandHandler.java   |  28 +++--
 .../server/ratis/ContainerStateMachine.java |  11 ++
 .../container/keyvalue/KeyValueHandler.java |  33 +++---
 .../ozone/client/io/ChunkGroupOutputStream.java |  14 ++-
 .../rpc/TestCloseContainerHandlingByClient.java |   2 +-
 .../rpc/TestContainerStateMachineFailures.java  |   6 +-
 .../transport/server/ratis/TestCSMMetrics.java  |   6 ++
 .../container/server/TestContainerServer.java   |   6 ++
 13 files changed, 255 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f944f338/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
new file mode 100644
index 000..4e406e6
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+
+/**
+ * Exceptions thrown when a write/update opearation is done on non-open
+ * container.
+ */
+public class ContainerNotOpenException extends StorageContainerException {
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the {@link #getMessage()} method)
+   */
+  public ContainerNotOpenException(String message) {
+super(message, ContainerProtos.Result.CONTAINER_NOT_OPEN);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f944f338/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
new file mode 100644
index 000..1378d1a
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ 

[38/49] hadoop git commit: HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume Failures. Contributed by Ayush Saxena.

2018-11-12 Thread brahma
HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume 
Failures. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6d4e19f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6d4e19f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6d4e19f

Branch: refs/heads/HDFS-13891
Commit: b6d4e19f34f474ea8068ebb374f55e0db2f714da
Parents: e269c3f
Author: Arpit Agarwal 
Authored: Mon Nov 12 15:31:42 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 12 15:31:42 2018 -0800

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6d4e19f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index f993ae7f..1caa4e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -429,7 +429,7 @@ No nodes are decommissioning
 {#helper_date_tostring value="{lastVolumeFailureDate}"/}
 {volfails}
 {estimatedCapacityLostTotal|fmt_bytes}
-{#failedStorageLocations}{.}{@sep}{/sep}{/failedStorageLocations}
+{#failedStorageIDs}{.}{@sep}{/sep}{/failedStorageIDs}
   
   {/LiveNodes}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/49] hadoop git commit: YARN-8877. [CSI] Extend service spec to allow setting resource attributes. Contributed by Weiwei Yang.

2018-11-12 Thread brahma
YARN-8877. [CSI] Extend service spec to allow setting resource attributes. 
Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42f3a708
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42f3a708
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42f3a708

Branch: refs/heads/HDFS-13891
Commit: 42f3a7082a90bc71f0e86dc1e50b0c77b05489cf
Parents: 3c9d97b
Author: Sunil G 
Authored: Mon Nov 12 21:09:30 2018 +0530
Committer: Sunil G 
Committed: Mon Nov 12 21:09:30 2018 +0530

--
 .../yarn/api/records/ResourceInformation.java   |  7 +++
 .../api/records/ResourceInformation.java| 18 
 .../yarn/service/component/Component.java   |  3 +-
 .../hadoop/yarn/service/TestServiceAM.java  | 47 
 .../yarn/service/conf/ExampleAppJson.java   |  1 +
 .../yarn/service/conf/TestAppJsonResolve.java   | 18 
 .../yarn/service/conf/examples/external3.json   | 26 +++
 .../markdown/yarn-service/YarnServiceAPI.md |  2 +-
 8 files changed, 120 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42f3a708/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 057e94e..047c09a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -276,6 +276,13 @@ public class ResourceInformation implements 
Comparable {
   }
 
   public static ResourceInformation newInstance(String name, String units,
+  long value, Map attributes) {
+return ResourceInformation
+.newInstance(name, units, value, ResourceTypes.COUNTABLE, 0L,
+Long.MAX_VALUE, null, attributes);
+  }
+
+  public static ResourceInformation newInstance(String name, String units,
   ResourceTypes resourceType) {
 return ResourceInformation.newInstance(name, units, 0L, resourceType, 0L,
 Long.MAX_VALUE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42f3a708/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
index 103fffb..e466ce7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
@@ -18,10 +18,12 @@
 package org.apache.hadoop.yarn.service.api.records;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.common.collect.ImmutableMap;
 import com.google.gson.annotations.SerializedName;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 
+import java.util.Map;
 import java.util.Objects;
 
 /**
@@ -35,11 +37,25 @@ public class ResourceInformation {
   @SerializedName("unit")
   private String unit = null;
 
+  @SerializedName("attributes")
+  private Map attributes = null;
+
   public ResourceInformation value(Long value) {
 this.value = value;
 return this;
   }
 
+  @ApiModelProperty(value = "")
+  @JsonProperty("attributes")
+  public Map getAttributes() {
+return attributes == null ? ImmutableMap.of() : attributes;
+  }
+
+  public ResourceInformation attributes(Map attributes) {
+this.attributes = attributes;
+return this;
+  }
+
   /**
* Integer value of the resource.
*
@@ -98,6 +114,8 @@ public class ResourceInformation {
 sb.append("class ResourceInformation {\n");
 sb.append("value: ").append(toIndentedString(value)).append("\n");
 sb.append("unit: 

[29/49] hadoop git commit: HDDS-767. OM should not search for STDOUT root logger for audit logging. Contributed by Dinesh Chitlangia.

2018-11-12 Thread brahma
HDDS-767. OM should not search for STDOUT root logger for audit logging. 
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c32b50d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c32b50d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c32b50d

Branch: refs/heads/HDFS-13891
Commit: 9c32b50d610463bb50a25bb01606ceeea8e04507
Parents: 4e72844
Author: Márton Elek 
Authored: Mon Nov 12 10:54:41 2018 +0100
Committer: Márton Elek 
Committed: Mon Nov 12 10:54:41 2018 +0100

--
 hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c32b50d/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
--
diff --git a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties 
b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
index 7be51ac..57577e1 100644
--- a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
+++ b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
@@ -86,5 +86,5 @@ logger.audit.appenderRefs=rolling
 logger.audit.appenderRef.file.ref=RollingFile
 
 rootLogger.level=INFO
-rootLogger.appenderRefs=stdout
-rootLogger.appenderRef.stdout.ref=STDOUT
+#rootLogger.appenderRefs=stdout
+#rootLogger.appenderRef.stdout.ref=STDOUT


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/49] hadoop git commit: HDDS-812. TestEndPoint#testCheckVersionResponse is failing. Contributed by Hanisha Koneru.

2018-11-12 Thread brahma
HDDS-812. TestEndPoint#testCheckVersionResponse is failing. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66bf6240
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66bf6240
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66bf6240

Branch: refs/heads/HDFS-13891
Commit: 66bf62404a038193fe5849c84edaa062d8e05f44
Parents: 524a752
Author: Bharat Viswanadham 
Authored: Thu Nov 8 17:52:09 2018 -0800
Committer: Bharat Viswanadham 
Committed: Thu Nov 8 17:52:09 2018 -0800

--
 .../java/org/apache/hadoop/ozone/container/common/TestEndPoint.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66bf6240/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index a089621..0f35607 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -196,6 +196,7 @@ public class TestEndPoint {
   // different from SCM server response scmId
   String newScmId = UUID.randomUUID().toString();
   scmServerImpl.setScmId(newScmId);
+  rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
   newState = versionTask.call();
   Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN,
 newState);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/49] hadoop git commit: HADOOP-15923. create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent.

2018-11-12 Thread brahma
HADOOP-15923. create-release script should set max-cache-ttl as well as 
default-cache-ttl for gpg-agent.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/703b2860
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/703b2860
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/703b2860

Branch: refs/heads/HDFS-13891
Commit: 703b2860a49577629e7b3ef461d8a61292e79c88
Parents: f8713f8
Author: Akira Ajisaka 
Authored: Tue Nov 13 13:40:43 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 13 13:40:43 2018 +0900

--
 dev-support/bin/create-release | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/703b2860/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 6ec3503..c861654 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -253,7 +253,8 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 36000" > "${LOGDIR}/gpgagent.conf"
+  echo "max-cache-ttl 36000" >> "${LOGDIR}/gpgagent.conf"
   # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/49] hadoop git commit: HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. Contributed by CR Hota

2018-11-12 Thread brahma
HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. 
Contributed by CR Hota


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7b63bac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7b63bac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7b63bac

Branch: refs/heads/HDFS-13891
Commit: e7b63baca1e10b28d8b4462fd80537b871951aa3
Parents: a67642c
Author: Brahma Reddy Battula 
Authored: Tue Nov 13 12:45:13 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 12:45:44 2018 +0530

--
 .../web/resources/NamenodeWebHdfsMethods.java   | 26 ++--
 1 file changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7b63bac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index d73fd45..c4d3239 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -337,9 +337,22 @@ public class NamenodeWebHdfsMethods {
 throw new IOException("No active nodes contain this block");
   }
 
-  private Token generateDelegationToken(
-  final NameNode namenode, final UserGroupInformation ugi,
+  public long renewDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+return cp.renewDelegationToken(token);
+  }
+
+  public void cancelDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+cp.cancelDelegationToken(token);
+  }
+
+  public Token generateDelegationToken(
+  final UserGroupInformation ugi,
   final String renewer) throws IOException {
+final NameNode namenode = (NameNode)context.getAttribute("name.node");
 final Credentials c = DelegationTokenSecretManager.createCredentials(
 namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
 if (c == null) {
@@ -384,7 +397,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, null);
+  ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 
@@ -705,7 +718,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  final long expiryTime = cp.renewDelegationToken(token);
+  final long expiryTime = renewDelegationToken(token);
   final String js = JsonUtil.toJsonString("long", expiryTime);
   return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
 }
@@ -714,7 +727,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  cp.cancelDelegationToken(token);
+  cancelDelegationToken(token);
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case MODIFYACLENTRIES: {
@@ -1138,9 +1151,8 @@ public class NamenodeWebHdfsMethods {
 throw new IllegalArgumentException(delegation.getName()
 + " parameter is not null.");
   }
-  final NameNode namenode = (NameNode)context.getAttribute("name.node");
   final Token token = generateDelegationToken(
-  namenode, ugi, renewer.getValue());
+  ugi, renewer.getValue());
 
   final String setServiceName = tokenService.getValue();
   final String setKind = tokenKind.getValue();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/49] hadoop git commit: HDDS-806. Update Ratis to latest snapshot version in ozone. Contributed by Tsz Wo Nicholas Sze and Mukul Kumar Singh.

2018-11-12 Thread brahma
HDDS-806. Update Ratis to latest snapshot version in ozone. Contributed by Tsz 
Wo Nicholas Sze and Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31614bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31614bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31614bcc

Branch: refs/heads/HDFS-13891
Commit: 31614bcc7cda614c45769aa779a839b25c375db2
Parents: 8d99648
Author: Shashikant Banerjee 
Authored: Fri Nov 9 00:05:45 2018 +0530
Committer: Shashikant Banerjee 
Committed: Fri Nov 9 00:05:45 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  8 
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 10 
 .../common/src/main/resources/ozone-default.xml | 15 ++
 .../server/ratis/ContainerStateMachine.java | 48 +---
 .../server/ratis/XceiverServerRatis.java| 11 +
 hadoop-hdds/pom.xml |  2 +-
 hadoop-ozone/pom.xml|  2 +-
 7 files changed, 67 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 38eec61..cedcc43 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -85,6 +85,14 @@ public final class ScmConfigKeys {
   public static final TimeDuration
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
   TimeDuration.valueOf(10, TimeUnit.SECONDS);
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+  "dfs.container.ratis.statemachinedata.sync.retries";
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
+  "dfs.container.ratis.log.queue.size";
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT = 128;
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
   "dfs.ratis.client.request.timeout.duration";
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 54b1cf8..9776817 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -261,6 +261,16 @@ public final class OzoneConfigKeys {
   public static final TimeDuration
   DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
   ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES;
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE;
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT;
   public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
   ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5ff60eb..2ffc2ab 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -60,6 +60,21 @@
 
   
   
+dfs.container.ratis.statemachinedata.sync.retries
+-1
+OZONE, DEBUG, CONTAINER, RATIS
+Number of times the WriteStateMachineData op will be tried
+  before failing, if this value is -1, then this retries indefinitely.
+
+  
+  
+   

[37/49] hadoop git commit: YARN-8997. [Submarine] Small refactors of modifier, condition check and redundant local variables. Contributed by Zhankun Tang.

2018-11-12 Thread brahma
YARN-8997. [Submarine] Small refactors of modifier, condition check and 
redundant local variables. Contributed by Zhankun Tang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e269c3fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e269c3fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e269c3fb

Branch: refs/heads/HDFS-13891
Commit: e269c3fb5a938e4359232628175569dbbd1a12c1
Parents: f944f33
Author: Giovanni Matteo Fumarola 
Authored: Mon Nov 12 15:06:43 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Mon Nov 12 15:06:43 2018 -0800

--
 .../org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java  | 4 +---
 .../submarine/runtimes/common/FSBasedSubmarineStorageImpl.java | 6 ++
 .../submarine/runtimes/yarnservice/YarnServiceJobMonitor.java  | 3 +--
 .../runtimes/yarnservice/YarnServiceJobSubmitter.java  | 5 +
 .../yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java  | 4 +---
 5 files changed, 6 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e269c3fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
index 546c6eb..05e830f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
@@ -156,9 +156,7 @@ public class CliUtils {
   return true;
 
 if (args.length == 1) {
-  if (args[0].equals("-h") || args[0].equals("--help")) {
-return true;
-  }
+  return args[0].equals("-h") || args[0].equals("--help");
 }
 
 return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e269c3fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
index 767fe78..1881510 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
@@ -73,8 +73,7 @@ public class FSBasedSubmarineStorageImpl extends 
SubmarineStorage {
   private Path getModelInfoPath(String modelName, String version, boolean 
create)
   throws IOException {
 Path modelDir = rdm.getModelDir(modelName, create);
-Path modelInfo = new Path(modelDir, version + ".info");
-return modelInfo;
+return new Path(modelDir, version + ".info");
   }
 
   private void serializeMap(FSDataOutputStream fos, Map map)
@@ -98,7 +97,6 @@ public class FSBasedSubmarineStorageImpl extends 
SubmarineStorage {
 
   private Path getJobInfoPath(String jobName, boolean create) throws 
IOException {
 Path path = rdm.getJobStagingArea(jobName, create);
-Path fileName = new Path(path, "job.info");
-return fileName;
+return new Path(path, "job.info");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e269c3fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobMonitor.java
index 

[08/49] hadoop git commit: YARN-8880. Add configurations for pluggable plugin framework. Contributed by Zhankun Tang.

2018-11-12 Thread brahma
YARN-8880. Add configurations for pluggable plugin framework. Contributed by 
Zhankun Tang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8c72d7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8c72d7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8c72d7b

Branch: refs/heads/HDFS-13891
Commit: f8c72d7b3acca8285bbc3024f491c4586805be1e
Parents: c96cbe8
Author: Weiwei Yang 
Authored: Thu Nov 8 12:23:00 2018 +0800
Committer: Weiwei Yang 
Committed: Thu Nov 8 12:23:00 2018 +0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  24 -
 .../src/main/resources/yarn-default.xml |  19 
 .../resourceplugin/ResourcePluginManager.java   |  34 +-
 .../TestResourcePluginManager.java  | 107 +--
 4 files changed, 170 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8c72d7b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ce38d27..e88d594 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1606,6 +1606,28 @@ public class YarnConfiguration extends Configuration {
   NM_PREFIX + "resource-plugins";
 
   /**
+   * This setting controls if pluggable device plugin framework is enabled.
+   * */
+  @Private
+  public static final String NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED =
+  NM_PREFIX + "pluggable-device-framework.enabled";
+
+  /**
+   * The pluggable device plugin framework is disabled by default
+   * */
+  @Private
+  public static final boolean DEFAULT_NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED =
+  false;
+
+  /**
+   * This setting contains vendor plugin class names for
+   * device plugin framework to load. Split by comma
+   * */
+  @Private
+  public static final String NM_PLUGGABLE_DEVICE_FRAMEWORK_DEVICE_CLASSES =
+  NM_PREFIX + "pluggable-device-framework.device-classes";
+
+  /**
* Prefix for gpu configurations. Work in progress: This configuration
* parameter may be changed/removed in the future.
*/
@@ -1647,7 +1669,7 @@ public class YarnConfiguration extends Configuration {
   NVIDIA_DOCKER_V1;
 
   /**
-   * This setting controls end point of nvidia-docker-v1 plugin
+   * This setting controls end point of nvidia-docker-v1 plugin.
*/
   @Private
   public static final String NVIDIA_DOCKER_PLUGIN_V1_ENDPOINT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8c72d7b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 1360e73..f5493bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3772,6 +3772,25 @@
 
   
 
+  This setting controls if pluggable device framework is enabled.
+  Disabled by default
+
+yarn.nodemanager.pluggable-device-framework.enabled
+false
+  
+
+  
+
+  Configure vendor device plugin class name here. Comma separated.
+  The class must be found in CLASSPATH. The pluggable device framework will
+  load these classes.
+
+yarn.nodemanager.pluggable-device-framework.device-classes
+
+  
+
+  
+
   When yarn.nodemanager.resource.gpu.allowed-gpu-devices=auto specified,
   YARN NodeManager needs to run GPU discovery binary (now only support
   nvidia-smi) to get GPU-related information.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8c72d7b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 

[11/49] hadoop git commit: HDDS-737. Introduce Incremental Container Report. Contributed by Nanda kumar.

2018-11-12 Thread brahma
HDDS-737. Introduce Incremental Container Report.
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c80f753b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c80f753b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c80f753b

Branch: refs/heads/HDFS-13891
Commit: c80f753b0e95eb722a972f836c1e4d16fd823434
Parents: e1bbf7d
Author: Nanda kumar 
Authored: Thu Nov 8 18:33:38 2018 +0530
Committer: Nanda kumar 
Committed: Thu Nov 8 18:33:38 2018 +0530

--
 .../hdds/scm/container/ContainerInfo.java   |   8 +
 .../statemachine/DatanodeStateMachine.java  |  17 +-
 .../common/statemachine/StateContext.java   |  13 +-
 .../CloseContainerCommandHandler.java   |  73 +++--
 .../states/endpoint/HeartbeatEndpointTask.java  |  12 +-
 .../StorageContainerDatanodeProtocol.proto  |   2 +-
 .../scm/command/CommandStatusReportHandler.java |   4 -
 .../container/CloseContainerEventHandler.java   |  27 --
 .../scm/container/CloseContainerWatcher.java| 101 ---
 .../hdds/scm/container/ContainerManager.java|  11 -
 .../hdds/scm/container/ContainerReplica.java|  30 +-
 .../scm/container/ContainerReportHandler.java   | 202 +
 .../IncrementalContainerReportHandler.java  |  98 +++
 .../hdds/scm/container/SCMContainerManager.java | 107 ---
 .../scm/container/states/ContainerStateMap.java |   7 -
 .../hadoop/hdds/scm/events/SCMEvents.java   |  34 +--
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   |  37 ++-
 .../hadoop/hdds/scm/node/NewNodeHandler.java|  16 +-
 .../hadoop/hdds/scm/node/NodeManager.java   |  50 +---
 .../hadoop/hdds/scm/node/NodeReportHandler.java |   2 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  |  69 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java|  66 +
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  |   2 +-
 .../hdds/scm/node/states/NodeStateMap.java  |  67 ++---
 .../server/SCMDatanodeHeartbeatDispatcher.java  |  17 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |   6 +-
 .../scm/server/StorageContainerManager.java |  21 +-
 .../command/TestCommandStatusReportHandler.java |  14 -
 .../hdds/scm/container/MockNodeManager.java |  66 ++---
 .../container/TestContainerReportHandler.java   |  15 +-
 .../container/TestContainerStateManager.java|   3 +
 .../scm/container/TestSCMContainerManager.java  | 107 +--
 .../replication/TestReplicationManager.java |  12 +-
 .../hdds/scm/node/TestDeadNodeHandler.java  |  24 +-
 .../container/TestCloseContainerWatcher.java| 289 ---
 .../ozone/container/common/TestEndPoint.java|   6 +-
 .../testutils/ReplicationNodeManagerMock.java   |  55 +---
 .../TestContainerStateManagerIntegration.java   |  11 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |   4 +-
 .../hdds/scm/pipeline/TestPipelineClose.java|   4 +-
 .../commandhandler/TestBlockDeletion.java   |   6 +-
 41 files changed, 558 insertions(+), 1157 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 5a9484a..edfa0f9 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -158,10 +158,18 @@ public class ContainerInfo implements 
Comparator,
 return usedBytes;
   }
 
+  public void setUsedBytes(long value) {
+usedBytes = value;
+  }
+
   public long getNumberOfKeys() {
 return numberOfKeys;
   }
 
+  public void setNumberOfKeys(long value) {
+numberOfKeys = value;
+  }
+
   public long getDeleteTransactionId() {
 return deleteTransactionId;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 4768cf8..12c33ff 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ 

[30/49] hadoop git commit: YARN-8987. Usability improvements node-attributes CLI. Contributed by Bibin A Chundatt.

2018-11-12 Thread brahma
YARN-8987. Usability improvements node-attributes CLI. Contributed by  Bibin A 
Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7411095
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7411095
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7411095

Branch: refs/heads/HDFS-13891
Commit: c741109522d2913b87638957c64b94dee6b51029
Parents: 9c32b50
Author: Weiwei Yang 
Authored: Mon Nov 12 18:18:23 2018 +0800
Committer: Weiwei Yang 
Committed: Mon Nov 12 18:18:23 2018 +0800

--
 .../yarn/client/cli/NodeAttributesCLI.java  |  3 +++
 .../server/resourcemanager/AdminService.java| 22 +++
 .../resourcemanager/TestRMAdminService.java | 28 
 3 files changed, 53 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7411095/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
index 13d5e24..d525087 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
@@ -205,6 +205,9 @@ public class NodeAttributesCLI extends Configured 
implements Tool {
   // print admin command detail
   printUsage(true, handler);
   return exitCode;
+} catch (YarnException e) {
+  errOut.println(e.toString());
+  return exitCode;
 } catch (Exception e) {
   errOut.println(e.toString());
   printUsage(true, handler);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7411095/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 601917a..880741a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -993,6 +993,7 @@ public class AdminService extends CompositeService 
implements
 nodeAttributesManager.addNodeAttributes(nodeAttributeMapping);
 break;
   case REMOVE:
+validateAttributesExists(nodesToAttributes);
 nodeAttributesManager.removeNodeAttributes(nodeAttributeMapping);
 break;
   case REPLACE:
@@ -1013,6 +1014,27 @@ public class AdminService extends CompositeService 
implements
 .newRecordInstance(NodesToAttributesMappingResponse.class);
   }
 
+  private void validateAttributesExists(
+  List nodesToAttributes) throws IOException {
+NodeAttributesManager nodeAttributesManager =
+rm.getRMContext().getNodeAttributesManager();
+for (NodeToAttributes nodeToAttrs : nodesToAttributes) {
+  String hostname = nodeToAttrs.getNode();
+  if (hostname == null) {
+continue;
+  }
+  Set attrs =
+  nodeAttributesManager.getAttributesForNode(hostname).keySet();
+  List attributes = nodeToAttrs.getNodeAttributes();
+  for (NodeAttribute nodeAttr : attributes) {
+if (!attrs.contains(nodeAttr)) {
+  throw new IOException("Node attribute [" + nodeAttr.getAttributeKey()
+  + "] doesn't exist on node " + nodeToAttrs.getNode());
+}
+  }
+}
+  }
+
   /**
* @param nodesToAttributesMapping input to be validated
* @param failOnUnknownNodes indicates to fail if the nodes are not 
available.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7411095/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
--
diff --git 

[03/49] hadoop git commit: HDDS-809. Refactor SCMChillModeManager.

2018-11-12 Thread brahma
HDDS-809. Refactor SCMChillModeManager.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/addec292
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/addec292
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/addec292

Branch: refs/heads/HDFS-13891
Commit: addec29297e61a417f0ce711bd76b6db53d504eb
Parents: 482716e
Author: Yiqun Lin 
Authored: Wed Nov 7 13:53:28 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Nov 7 13:54:08 2018 +0800

--
 .../org/apache/hadoop/hdds/scm/ScmUtils.java|   2 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   2 +-
 .../hdds/scm/chillmode/ChillModeExitRule.java   |  32 ++
 .../hdds/scm/chillmode/ChillModePrecheck.java   |  68 
 .../scm/chillmode/ChillModeRestrictedOps.java   |  41 +++
 .../scm/chillmode/ContainerChillModeRule.java   | 112 +++
 .../scm/chillmode/DataNodeChillModeRule.java|  83 +
 .../hadoop/hdds/scm/chillmode/Precheck.java |  29 ++
 .../hdds/scm/chillmode/SCMChillModeManager.java | 153 +
 .../hadoop/hdds/scm/chillmode/package-info.java |  18 ++
 .../hdds/scm/server/ChillModePrecheck.java  |  69 
 .../apache/hadoop/hdds/scm/server/Precheck.java |  29 --
 .../hdds/scm/server/SCMChillModeManager.java| 319 ---
 .../scm/server/SCMClientProtocolServer.java |   1 +
 .../scm/server/StorageContainerManager.java |   1 +
 .../scm/chillmode/TestSCMChillModeManager.java  | 215 +
 .../scm/server/TestSCMChillModeManager.java | 215 -
 .../hadoop/ozone/om/TestScmChillMode.java   |   2 +-
 18 files changed, 756 insertions(+), 635 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/addec292/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
index 435f0a5..43b4452 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
+import org.apache.hadoop.hdds.scm.chillmode.Precheck;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.server.Precheck;
 
 /**
  * SCM utility class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/addec292/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index c878d97..85658b9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmUtils;
+import org.apache.hadoop.hdds.scm.chillmode.ChillModePrecheck;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.server.ChillModePrecheck;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.metrics2.util.MBeans;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/addec292/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
new file mode 100644
index 000..d283dfe
--- /dev/null
+++ 

[45/49] hadoop git commit: HDFS-13906. RBF: Add multiple paths for dfsrouteradmin 'rm' and 'clrquota' commands. Contributed by Ayush Saxena.

2018-11-12 Thread brahma
HDFS-13906. RBF: Add multiple paths for dfsrouteradmin 'rm' and 'clrquota' 
commands. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af22e356
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af22e356
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af22e356

Branch: refs/heads/HDFS-13891
Commit: af22e356beac6867f0a3f8030287a4c7c924b587
Parents: e7b63ba
Author: Vinayakumar B 
Authored: Fri Oct 12 17:19:55 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:18:57 2018 +0530

--
 .../hdfs/tools/federation/RouterAdmin.java  | 102 ++-
 .../federation/router/TestRouterAdminCLI.java   |  82 ---
 2 files changed, 122 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af22e356/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 1aefe4f..4a9cc7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -151,17 +151,7 @@ public class RouterAdmin extends Configured implements 
Tool {
* @param arg List of of command line parameters.
*/
   private void validateMax(String[] arg) {
-if (arg[0].equals("-rm")) {
-  if (arg.length > 2) {
-throw new IllegalArgumentException(
-"Too many arguments, Max=1 argument allowed");
-  }
-} else if (arg[0].equals("-ls")) {
-  if (arg.length > 2) {
-throw new IllegalArgumentException(
-"Too many arguments, Max=1 argument allowed");
-  }
-} else if (arg[0].equals("-clrQuota")) {
+if (arg[0].equals("-ls")) {
   if (arg.length > 2) {
 throw new IllegalArgumentException(
 "Too many arguments, Max=1 argument allowed");
@@ -183,63 +173,63 @@ public class RouterAdmin extends Configured implements 
Tool {
 }
   }
 
-  @Override
-  public int run(String[] argv) throws Exception {
-if (argv.length < 1) {
-  System.err.println("Not enough parameters specified");
-  printUsage();
-  return -1;
-}
-
-int exitCode = -1;
-int i = 0;
-String cmd = argv[i++];
-
-// Verify that we have enough command line parameters
+  /**
+   * Usage: validates the minimum number of arguments for a command.
+   * @param argv List of of command line parameters.
+   * @return true if number of arguments are valid for the command else false.
+   */
+  private boolean validateMin(String[] argv) {
+String cmd = argv[0];
 if ("-add".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-update".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-rm".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-setQuota".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-clrQuota".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-safemode".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-nameservice".equals(cmd)) {
   if (argv.length < 3) {
-System.err.println("Not enough parameters specificed for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 }
+return true;
+  }
+
+  @Override
+  public int run(String[] argv) throws Exception {
+if (argv.length < 1) {
+  System.err.println("Not enough parameters specified");
+   

[25/49] hadoop git commit: YARN-9002. Improve keytab loading for YARN Service. Contributed by Gour Saha

2018-11-12 Thread brahma
YARN-9002.  Improve keytab loading for YARN Service.
Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26642487
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26642487
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26642487

Branch: refs/heads/HDFS-13891
Commit: 2664248797365761089a86d5bd59aa9ac3ebcc28
Parents: 298d250
Author: Eric Yang 
Authored: Sat Nov 10 01:52:19 2018 -0500
Committer: Eric Yang 
Committed: Sat Nov 10 01:52:19 2018 -0500

--
 .../yarn/service/client/ServiceClient.java  | 38 
 .../exceptions/RestApiErrorMessages.java|  2 --
 .../yarn/service/utils/ServiceApiUtil.java  | 17 -
 .../yarn/service/utils/TestServiceApiUtil.java  | 25 +++--
 4 files changed, 19 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26642487/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 91d6367..1158e44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1392,31 +1392,21 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   throw new YarnException(e);
 }
 
-if (keytabURI.getScheme() != null) {
-  switch (keytabURI.getScheme()) {
-  case "hdfs":
-Path keytabOnhdfs = new Path(keytabURI);
-if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) {
-  LOG.warn(service.getName() + "'s keytab (principalName = "
-  + principalName + ") doesn't exist at: " + keytabOnhdfs);
-  return;
-}
-LocalResource keytabRes = fileSystem.createAmResource(keytabOnhdfs,
-LocalResourceType.FILE);
-localResource.put(String.format(YarnServiceConstants.KEYTAB_LOCATION,
-service.getName()), keytabRes);
-LOG.info("Adding " + service.getName() + "'s keytab for "
-+ "localization, uri = " + keytabOnhdfs);
-break;
-  case "file":
-LOG.info("Using a keytab from localhost: " + keytabURI);
-break;
-  default:
-LOG.warn("Unsupported keytab URI scheme " + keytabURI);
-break;
-  }
+if ("file".equals(keytabURI.getScheme())) {
+  LOG.info("Using a keytab from localhost: " + keytabURI);
 } else {
-  LOG.warn("Unsupported keytab URI scheme " + keytabURI);
+  Path keytabOnhdfs = new Path(keytabURI);
+  if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) {
+LOG.warn(service.getName() + "'s keytab (principalName = "
++ principalName + ") doesn't exist at: " + keytabOnhdfs);
+return;
+  }
+  LocalResource keytabRes = fileSystem.createAmResource(keytabOnhdfs,
+  LocalResourceType.FILE);
+  localResource.put(String.format(YarnServiceConstants.KEYTAB_LOCATION,
+  service.getName()), keytabRes);
+  LOG.info("Adding " + service.getName() + "'s keytab for "
+  + "localization, uri = " + keytabOnhdfs);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26642487/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 8f831ee..57c6449 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ 

[23/49] hadoop git commit: HDDS-733. Create container if not exist, as part of chunk write. Contributed by Lokesh Jain.

2018-11-12 Thread brahma
HDDS-733. Create container if not exist, as part of chunk write.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fe50b49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fe50b49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fe50b49

Branch: refs/heads/HDFS-13891
Commit: 9fe50b4991a3084181b655f9836eb2ab232580a6
Parents: a736b5d
Author: Nanda kumar 
Authored: Sat Nov 10 00:07:26 2018 +0530
Committer: Nanda kumar 
Committed: Sat Nov 10 00:08:17 2018 +0530

--
 .../scm/client/ContainerOperationClient.java| 10 --
 .../hdds/scm/container/ContainerInfo.java   | 10 +-
 .../common/helpers/AllocatedBlock.java  | 19 +---
 ...kLocationProtocolClientSideTranslatorPB.java |  3 +-
 ...kLocationProtocolServerSideTranslatorPB.java |  1 -
 .../main/proto/ScmBlockLocationProtocol.proto   |  3 +-
 .../container/common/impl/HddsDispatcher.java   | 73 ++-
 .../CloseContainerCommandHandler.java   | 21 +++--
 .../common/impl/TestHddsDispatcher.java | 60 
 .../hadoop/hdds/scm/block/BlockManagerImpl.java | 99 +---
 .../scm/chillmode/ContainerChillModeRule.java   |  9 +-
 .../container/CloseContainerEventHandler.java   | 13 ---
 .../scm/container/ContainerStateManager.java| 46 +++--
 .../hdds/scm/container/SCMContainerManager.java | 85 +
 .../scm/server/SCMClientProtocolServer.java | 28 ++
 .../scm/chillmode/TestSCMChillModeManager.java  | 14 +--
 .../TestCloseContainerEventHandler.java | 15 ---
 .../scm/container/TestSCMContainerManager.java  | 28 --
 .../hdds/scm/node/TestDeadNodeHandler.java  | 20 
 .../ozone/client/io/ChunkGroupOutputStream.java | 33 +--
 .../ozone/om/helpers/OmKeyLocationInfo.java | 21 +
 .../src/main/proto/OzoneManagerProtocol.proto   |  1 -
 .../TestContainerStateManagerIntegration.java   | 83 +++-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |  4 -
 .../hdds/scm/pipeline/TestPipelineClose.java|  8 --
 .../rpc/TestCloseContainerHandlingByClient.java | 52 +++---
 .../rpc/TestContainerStateMachineFailures.java  |  6 +-
 .../ozone/container/ContainerTestHelper.java| 16 
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  2 -
 .../ozone/om/ScmBlockLocationTestIngClient.java |  3 +-
 30 files changed, 294 insertions(+), 492 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe50b49/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 8c96164..b9f38fe 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -117,17 +117,7 @@ public class ContainerOperationClient implements ScmClient 
{
   public void createContainer(XceiverClientSpi client,
   long containerId) throws IOException {
 String traceID = UUID.randomUUID().toString();
-storageContainerLocationClient.notifyObjectStageChange(
-ObjectStageChangeRequestProto.Type.container,
-containerId,
-ObjectStageChangeRequestProto.Op.create,
-ObjectStageChangeRequestProto.Stage.begin);
 ContainerProtocolCalls.createContainer(client, containerId, traceID);
-storageContainerLocationClient.notifyObjectStageChange(
-ObjectStageChangeRequestProto.Type.container,
-containerId,
-ObjectStageChangeRequestProto.Op.create,
-ObjectStageChangeRequestProto.Stage.complete);
 
 // Let us log this info after we let SCM know that we have completed the
 // creation state.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe50b49/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index edfa0f9..1edd973 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -445,13 +445,11 @@ public class ContainerInfo implements 
Comparator,
 
   /**
* Check if a container is in open state, this will check if the
-   * container is either 

[04/49] hadoop git commit: HADOOP-15907. Add missing maven modules in BUILDING.txt. Contributed Wanqiang Ji.

2018-11-12 Thread brahma
HADOOP-15907. Add missing maven modules in BUILDING.txt. Contributed Wanqiang 
Ji.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6444f1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6444f1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6444f1c

Branch: refs/heads/HDFS-13891
Commit: e6444f1c640cda86e1c97fbfebf68de92a162c95
Parents: addec29
Author: Weiwei Yang 
Authored: Wed Nov 7 16:45:16 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Nov 7 16:45:16 2018 +0800

--
 BUILDING.txt | 30 +++---
 1 file changed, 19 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6444f1c/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9727004..d35e3af 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -93,17 +93,25 @@ Optional packages:
 
--
 Maven main modules:
 
-  hadoop(Main Hadoop project)
- - hadoop-project   (Parent POM for all Hadoop Maven modules.  
   )
-(All plugins & dependencies versions are 
defined here.)
- - hadoop-project-dist  (Parent POM for modules that generate 
distributions.)
- - hadoop-annotations   (Generates the Hadoop doclet used to 
generated the Javadocs)
- - hadoop-assemblies(Maven assemblies used by the different 
modules)
- - hadoop-common-project(Hadoop Common)
- - hadoop-hdfs-project  (Hadoop HDFS)
- - hadoop-mapreduce-project (Hadoop MapReduce)
- - hadoop-tools (Hadoop tools like Streaming, Distcp, etc.)
- - hadoop-dist  (Hadoop distribution assembler)
+  hadoop(Main Hadoop project)
+ - hadoop-project   (Parent POM for all Hadoop Maven 
modules. )
+(All plugins & dependencies versions 
are defined here.)
+ - hadoop-project-dist  (Parent POM for modules that generate 
distributions.)
+ - hadoop-annotations   (Generates the Hadoop doclet used to 
generated the Javadocs)
+ - hadoop-assemblies(Maven assemblies used by the 
different modules)
+ - hadoop-maven-plugins (Maven plugins used in project)
+ - hadoop-build-tools   (Build tools like checkstyle, etc.)
+ - hadoop-common-project(Hadoop Common)
+ - hadoop-hdfs-project  (Hadoop HDFS)
+ - hadoop-yarn-project  (Hadoop YARN)
+ - hadoop-mapreduce-project (Hadoop MapReduce)
+ - hadoop-ozone (Hadoop Ozone)
+ - hadoop-hdds  (Hadoop Distributed Data Store)
+ - hadoop-tools (Hadoop tools like Streaming, Distcp, 
etc.)
+ - hadoop-dist  (Hadoop distribution assembler)
+ - hadoop-client-modules(Hadoop client modules)
+ - hadoop-minicluster   (Hadoop minicluster artifacts)
+ - hadoop-cloud-storage-project (Generates artifacts to access cloud 
storage like aws, azure, etc.)
 
 
--
 Where to run Maven from?


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/49] hadoop git commit: YARN-8988. Reduce the verbose log on RM heartbeat path when distributed node-attributes is enabled. Contributed by Tao Yang.

2018-11-12 Thread brahma
YARN-8988. Reduce the verbose log on RM heartbeat path when distributed 
node-attributes is enabled. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1bbf7dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1bbf7dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1bbf7dc

Branch: refs/heads/HDFS-13891
Commit: e1bbf7dcdfc30a61a2b10bef09c59ff17d290488
Parents: f8c72d7
Author: Weiwei Yang 
Authored: Thu Nov 8 17:47:18 2018 +0800
Committer: Weiwei Yang 
Committed: Thu Nov 8 17:47:18 2018 +0800

--
 .../resourcemanager/nodelabels/NodeAttributesManagerImpl.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1bbf7dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
index e524788..83c5983 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
@@ -221,8 +221,10 @@ public class NodeAttributesManagerImpl extends 
NodeAttributesManager {
 
   // Notify RM
   if (rmContext != null && rmContext.getDispatcher() != null) {
-LOG.info("Updated NodeAttribute event to RM:" + newNodeToAttributesMap
-.values());
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Updated NodeAttribute event to RM:"
+  + newNodeToAttributesMap.values());
+}
 rmContext.getDispatcher().getEventHandler().handle(
 new NodeAttributesUpdateSchedulerEvent(newNodeToAttributesMap));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/49] hadoop git commit: YARN-8776. Implement Container Exec feature in LinuxContainerExecutor. Contributed by Eric Yang

2018-11-12 Thread brahma
YARN-8776. Implement Container Exec feature in LinuxContainerExecutor. 
Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f9c4f32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f9c4f32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f9c4f32

Branch: refs/heads/HDFS-13891
Commit: 1f9c4f32e842529be5980e395587f135452372bb
Parents: 18fe65d
Author: Billie Rinaldi 
Authored: Mon Nov 12 10:41:45 2018 -0800
Committer: Billie Rinaldi 
Committed: Mon Nov 12 10:42:30 2018 -0800

--
 .../server/nodemanager/ContainerExecutor.java   |  2 +-
 .../nodemanager/LinuxContainerExecutor.java | 31 +-
 .../linux/privileged/PrivilegedOperation.java   |  1 +
 .../privileged/PrivilegedOperationExecutor.java | 57 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |  8 +++
 .../DelegatingLinuxContainerRuntime.java| 10 
 .../runtime/DockerLinuxContainerRuntime.java| 45 ++
 .../linux/runtime/docker/DockerExecCommand.java | 62 
 .../runtime/ContainerRuntime.java   | 14 -
 .../executor/ContainerExecContext.java  | 11 ++--
 .../webapp/ContainerShellWebSocket.java | 49 
 .../server/nodemanager/webapp/WebServer.java|  1 +
 .../impl/container-executor.c   |  9 ++-
 .../nodemanager/TestContainerExecutor.java  |  3 +-
 .../nodemanager/TestLinuxContainerExecutor.java |  4 +-
 .../runtime/MockLinuxContainerRuntime.java  |  9 +++
 .../TestContainersMonitorResourceChange.java|  2 +
 17 files changed, 275 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9c4f32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 6024dbf..77b7859 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -38,7 +38,6 @@ import 
java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,6 +58,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext;
 import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9c4f32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 0282f58..db2fed9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -62,15 +62,10 @@ import 

[14/49] hadoop git commit: HDFS-14039. ec -listPolicies doesn't show correct state for the default policy when the default is not RS(6, 3). Contributed by Kitti Nanasi.

2018-11-12 Thread brahma
HDFS-14039. ec -listPolicies doesn't show correct state for the default policy 
when the default is not RS(6,3). Contributed by Kitti Nanasi.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d99648c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d99648c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d99648c

Branch: refs/heads/HDFS-13891
Commit: 8d99648c203004045a9339ad27258092969145d6
Parents: 724c150
Author: Kitti Nanasi 
Authored: Thu Nov 8 10:00:09 2018 -0800
Committer: Xiao Chen 
Committed: Thu Nov 8 10:01:19 2018 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 119 ++-
 .../server/namenode/FSImageFormatProtobuf.java  |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/namenode/TestEnabledECPolicies.java  | 103 +++-
 .../hdfs/server/namenode/TestFSImage.java   |  42 +--
 .../server/namenode/TestNamenodeRetryCache.java |   2 +-
 .../server/namenode/TestStripedINodeFile.java   |   2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   2 +-
 8 files changed, 231 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d99648c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index d2bf3af..57fa958 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -81,6 +82,15 @@ public final class ErasureCodingPolicyManager {
   private ErasureCodingPolicyInfo[] allPolicies;
 
   /**
+   * All policies in the state as it will be persisted in the fsimage.
+   *
+   * The difference between persisted policies and all policies is that
+   * if a default policy is only enabled at startup,
+   * it will appear as disabled in the persisted policy list and in the 
fsimage.
+   */
+  private Map allPersistedPolicies;
+
+  /**
* All enabled policies sorted by name for fast querying, including built-in
* policy, user defined policy.
*/
@@ -90,6 +100,7 @@ public final class ErasureCodingPolicyManager {
*/
   private ErasureCodingPolicy[] enabledPolicies;
 
+  private String defaultPolicyName;
 
   private volatile static ErasureCodingPolicyManager instance = null;
 
@@ -102,14 +113,11 @@ public final class ErasureCodingPolicyManager {
 
   private ErasureCodingPolicyManager() {}
 
-  public void init(Configuration conf) {
-// Load erasure coding default policy
-final String defaultPolicyName = conf.getTrimmed(
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
+  public void init(Configuration conf) throws IOException {
 this.policiesByName = new TreeMap<>();
 this.policiesByID = new TreeMap<>();
 this.enabledPoliciesByName = new TreeMap<>();
+this.allPersistedPolicies = new TreeMap<>();
 
 /**
  * TODO: load user defined EC policy from fsImage HDFS-7859
@@ -125,31 +133,12 @@ public final class ErasureCodingPolicyManager {
   final ErasureCodingPolicyInfo info = new ErasureCodingPolicyInfo(policy);
   policiesByName.put(policy.getName(), info);
   policiesByID.put(policy.getId(), info);
+  allPersistedPolicies.put(policy.getId(),
+  new ErasureCodingPolicyInfo(policy));
 }
 
-if (!defaultPolicyName.isEmpty()) {
-  final ErasureCodingPolicyInfo info =
-  policiesByName.get(defaultPolicyName);
-  if (info == null) {
-String names = policiesByName.values()
-.stream().map((pi) -> pi.getPolicy().getName())
-.collect(Collectors.joining(", "));
-String msg = String.format("EC policy '%s' specified at %s is not a "
-+ "valid policy. Please choose from list of available "
-+ "policies: [%s]",
-defaultPolicyName,
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
-names);
-throw new HadoopIllegalArgumentException(msg);
-  }
-  

[40/49] hadoop git commit: HDDS-831. TestOzoneShell in integration-test is flaky. Contributed by Nanda kumar.

2018-11-12 Thread brahma
HDDS-831. TestOzoneShell in integration-test is flaky. Contributed by Nanda 
kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8713f8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8713f8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8713f8a

Branch: refs/heads/HDFS-13891
Commit: f8713f8adea9d69330933a2cde594ed11ed9520c
Parents: 4c465f5
Author: Yiqun Lin 
Authored: Tue Nov 13 10:38:27 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Nov 13 10:38:27 2018 +0800

--
 .../test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8713f8a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 1900024..bd05b92 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -266,7 +266,7 @@ public class TestOzoneShell {
*/
   @Test
   public void testCreateVolumeWithoutUser() throws Exception {
-String volumeName = "volume" + RandomStringUtils.randomNumeric(1);
+String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
 String[] args = new String[] {"volume", "create", url + "/" + volumeName,
 "--root"};
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/49] hadoop git commit: YARN-8933. [AMRMProxy] Fix potential empty fields in allocation response, move SubClusterTimeout to FederationInterceptor. Contributed by Botong Huang.

2018-11-12 Thread brahma
YARN-8933. [AMRMProxy] Fix potential empty fields in allocation response, move 
SubClusterTimeout to FederationInterceptor. Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5ec85d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5ec85d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5ec85d9

Branch: refs/heads/HDFS-13891
Commit: b5ec85d96615e8214c14b57f8980a1dee6197ffa
Parents: 2664248
Author: Botong Huang 
Authored: Sun Nov 11 11:12:53 2018 -0800
Committer: Botong Huang 
Committed: Sun Nov 11 11:12:53 2018 -0800

--
 .../amrmproxy/BroadcastAMRMProxyPolicy.java |   4 +-
 .../amrmproxy/FederationAMRMProxyPolicy.java|   8 +-
 .../policies/amrmproxy/HomeAMRMProxyPolicy.java |   5 +-
 .../LocalityMulticastAMRMProxyPolicy.java   |  52 ++--
 .../amrmproxy/RejectAMRMProxyPolicy.java|   4 +-
 .../policies/BaseFederationPoliciesTest.java|   5 +-
 .../TestBroadcastAMRMProxyFederationPolicy.java |  10 +-
 .../amrmproxy/TestHomeAMRMProxyPolicy.java  |  10 +-
 .../TestLocalityMulticastAMRMProxyPolicy.java   |  41 +++---
 .../amrmproxy/TestRejectAMRMProxyPolicy.java|   5 +-
 .../amrmproxy/FederationInterceptor.java| 129 ---
 .../amrmproxy/TestFederationInterceptor.java|  63 -
 12 files changed, 236 insertions(+), 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ec85d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
index eb83baa..643bfa6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
@@ -21,6 +21,7 @@ package 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -49,7 +50,8 @@ public class BroadcastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 
   @Override
   public Map> splitResourceRequests(
-  List resourceRequests) throws YarnException {
+  List resourceRequests,
+  Set timedOutSubClusters) throws YarnException {
 
 Map activeSubclusters =
 getActiveSubclusters();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ec85d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/FederationAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/FederationAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/FederationAMRMProxyPolicy.java
index 0541df4..3d39d72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/FederationAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/FederationAMRMProxyPolicy.java
@@ -19,6 +19,7 @@ package 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
 
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -39,15 +40,16 @@ public interface FederationAMRMProxyPolicy
*
* @param resourceRequests the list of {@link ResourceRequest}s from the AM 
to
*  be split
-   *
+   * @param timedOutSubClusters the set of sub-clusters that haven't had a
+   *  

[47/49] hadoop git commit: HDFS-12284. RBF: Support for Kerberos authentication. Contributed by Sherwood Zheng and Inigo Goiri.

2018-11-12 Thread brahma
HDFS-12284. RBF: Support for Kerberos authentication. Contributed by Sherwood 
Zheng and Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a47397cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a47397cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a47397cd

Branch: refs/heads/HDFS-13891
Commit: a47397cd863021481a1c04ea4e7cf328e5b5c291
Parents: 832b220
Author: Brahma Reddy Battula 
Authored: Wed Nov 7 07:33:37 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:18:57 2018 +0530

--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml |  10 ++
 .../server/federation/router/RBFConfigKeys.java |  11 ++
 .../hdfs/server/federation/router/Router.java   |  28 
 .../federation/router/RouterAdminServer.java|   7 +
 .../federation/router/RouterHttpServer.java |   5 +-
 .../federation/router/RouterRpcClient.java  |   9 +-
 .../federation/router/RouterRpcServer.java  |  12 ++
 .../src/main/resources/hdfs-rbf-default.xml |  47 ++
 .../fs/contract/router/RouterHDFSContract.java  |   9 +-
 .../fs/contract/router/SecurityConfUtil.java| 156 +++
 .../TestRouterHDFSContractAppendSecure.java |  46 ++
 .../TestRouterHDFSContractConcatSecure.java |  51 ++
 .../TestRouterHDFSContractCreateSecure.java |  48 ++
 .../TestRouterHDFSContractDeleteSecure.java |  46 ++
 ...stRouterHDFSContractGetFileStatusSecure.java |  47 ++
 .../TestRouterHDFSContractMkdirSecure.java  |  48 ++
 .../TestRouterHDFSContractOpenSecure.java   |  47 ++
 .../TestRouterHDFSContractRenameSecure.java |  48 ++
 ...stRouterHDFSContractRootDirectorySecure.java |  63 
 .../TestRouterHDFSContractSeekSecure.java   |  48 ++
 .../TestRouterHDFSContractSetTimesSecure.java   |  48 ++
 .../server/federation/MiniRouterDFSCluster.java |  58 ++-
 22 files changed, 879 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47397cd/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index 386eb41..9f515bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -35,6 +35,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 
+  org.bouncycastle
+  bcprov-jdk16
+  test
+
+
+  org.apache.hadoop
+  hadoop-minikdc
+  test
+
+
   org.apache.hadoop
   hadoop-common
   provided

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47397cd/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index bbd4250..fa474f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -242,4 +242,15 @@ public class RBFConfigKeys extends 
CommonConfigurationKeysPublic {
   FEDERATION_ROUTER_PREFIX + "quota-cache.update.interval";
   public static final long DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL_DEFAULT =
   6;
+
+  // HDFS Router security
+  public static final String DFS_ROUTER_KEYTAB_FILE_KEY =
+  FEDERATION_ROUTER_PREFIX + "keytab.file";
+  public static final String DFS_ROUTER_KERBEROS_PRINCIPAL_KEY =
+  FEDERATION_ROUTER_PREFIX + "kerberos.principal";
+  public static final String DFS_ROUTER_KERBEROS_PRINCIPAL_HOSTNAME_KEY =
+  FEDERATION_ROUTER_PREFIX + "kerberos.principal.hostname";
+
+  public static final String DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY 
=
+  FEDERATION_ROUTER_PREFIX + "kerberos.internal.spnego.principal";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a47397cd/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
index 5ddc129..3288273 100644
--- 

[39/49] hadoop git commit: YARN-8761. Service AM support for decommissioning component instances. Contributed by Billie Rinaldi

2018-11-12 Thread brahma
YARN-8761. Service AM support for decommissioning component instances.
   Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c465f55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c465f55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c465f55

Branch: refs/heads/HDFS-13891
Commit: 4c465f5535054dad2ef0b18128fb115129f6939e
Parents: b6d4e19
Author: Eric Yang 
Authored: Mon Nov 12 19:53:10 2018 -0500
Committer: Eric Yang 
Committed: Mon Nov 12 19:53:10 2018 -0500

--
 .../yarn/service/client/ApiServiceClient.java   |  28 
 .../hadoop/yarn/service/webapp/ApiServer.java   |  40 +
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   5 +
 .../hadoop/yarn/service/ClientAMProtocol.java   |   6 +
 .../hadoop/yarn/service/ClientAMService.java|  20 +++
 .../yarn/service/api/records/Component.java |  26 
 .../yarn/service/client/ServiceClient.java  |  60 +++-
 .../yarn/service/component/Component.java   |  64 +++-
 .../yarn/service/component/ComponentEvent.java  |  10 ++
 .../service/component/ComponentEventType.java   |   3 +-
 .../component/instance/ComponentInstance.java   |  18 +--
 .../pb/client/ClientAMProtocolPBClientImpl.java |  14 ++
 .../service/ClientAMProtocolPBServiceImpl.java  |  13 ++
 .../yarn/service/utils/ServiceApiUtil.java  |  56 ++-
 .../src/main/proto/ClientAMProtocol.proto   |  11 +-
 .../hadoop/yarn/service/ServiceTestUtils.java   |   2 +-
 .../TestComponentDecommissionInstances.java | 147 +++
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  26 +++-
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  15 +-
 .../hadoop/yarn/client/api/AppAdminClient.java  |  12 ++
 .../src/site/markdown/YarnCommands.md   |   3 +
 .../markdown/yarn-service/YarnServiceAPI.md |   1 +
 22 files changed, 548 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c465f55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index 851acbd..38cfd11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -746,4 +746,32 @@ public class ApiServiceClient extends AppAdminClient {
 }
 return result;
   }
+
+  @Override
+  public int actionDecommissionInstances(String appName, List
+  componentInstances) throws IOException, YarnException {
+int result = EXIT_SUCCESS;
+try {
+  Service service = new Service();
+  service.setName(appName);
+  for (String instance : componentInstances) {
+String componentName = ServiceApiUtil.parseComponentName(instance);
+Component component = service.getComponent(componentName);
+if (component == null) {
+  component = new Component();
+  component.setName(componentName);
+  service.addComponent(component);
+}
+component.addDecommissionedInstance(instance);
+  }
+  String buffer = jsonSerDeser.toJson(service);
+  ClientResponse response = getApiClient(getServicePath(appName))
+  .put(ClientResponse.class, buffer);
+  result = processResponse(response);
+} catch (Exception e) {
+  LOG.error("Fail to decommission instance: ", e);
+  result = EXIT_EXCEPTION_THROWN;
+}
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c465f55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 

[16/49] hadoop git commit: HADOOP-15903. Allow HttpServer2 to discover resources in /static when symlinks are used. Contributed by Inigo Goiri.

2018-11-12 Thread brahma
HADOOP-15903. Allow HttpServer2 to discover resources in /static when symlinks 
are used. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89b49167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89b49167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89b49167

Branch: refs/heads/HDFS-13891
Commit: 89b49167a5a7b78cb25d2ee56ac011f344e9c9e4
Parents: 31614bc
Author: Giovanni Matteo Fumarola 
Authored: Thu Nov 8 14:52:24 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Thu Nov 8 14:52:24 2018 -0800

--
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89b49167/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index a9c2319..598d3ee 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -84,6 +84,7 @@ import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.server.SessionManager;
 import org.eclipse.jetty.server.SslConnectionFactory;
+import org.eclipse.jetty.server.handler.AllowSymLinkAliasChecker;
 import org.eclipse.jetty.server.handler.ContextHandlerCollection;
 import org.eclipse.jetty.server.handler.HandlerCollection;
 import org.eclipse.jetty.server.handler.RequestLogHandler;
@@ -725,6 +726,7 @@ public final class HttpServer2 implements FilterContainer {
 asm.getSessionCookieConfig().setSecure(true);
   }
   logContext.setSessionHandler(handler);
+  logContext.addAliasCheck(new AllowSymLinkAliasChecker());
   setContextAttributes(logContext, conf);
   addNoCacheFilter(logContext);
   defaultContexts.put(logContext, true);
@@ -747,6 +749,7 @@ public final class HttpServer2 implements FilterContainer {
   asm.getSessionCookieConfig().setSecure(true);
 }
 staticContext.setSessionHandler(handler);
+staticContext.addAliasCheck(new AllowSymLinkAliasChecker());
 setContextAttributes(staticContext, conf);
 defaultContexts.put(staticContext, true);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/49] hadoop git commit: YARN-8866. Fix a parsing error for crossdomain.xml.

2018-11-12 Thread brahma
YARN-8866. Fix a parsing error for crossdomain.xml.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dc1f6db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dc1f6db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dc1f6db

Branch: refs/heads/HDFS-13891
Commit: 8dc1f6dbf712a65390a9a6859f62fec0481af31b
Parents: e6444f1
Author: Takanobu Asanuma 
Authored: Wed Nov 7 18:26:07 2018 +0900
Committer: Takanobu Asanuma 
Committed: Wed Nov 7 18:26:07 2018 +0900

--
 .../hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dc1f6db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
index 43a2ea6..a9597e9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
@@ -18,7 +18,12 @@
 * limitations under the License.
 -->
 
-http://www.adobe.com/xml/dtds/cross-domain-policy.dtd;>
+
+  
+  
+  ]>
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/49] hadoop git commit: HADOOP-15912. start-build-env.sh still creates an invalid /etc/sudoers.d/hadoop-build-${USER_ID} file entry after HADOOP-15802. Contributed by Akira Ajisaka.

2018-11-12 Thread brahma
HADOOP-15912. start-build-env.sh still creates an invalid 
/etc/sudoers.d/hadoop-build-${USER_ID} file entry after HADOOP-15802. 
Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a67642c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a67642c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a67642c3

Branch: refs/heads/HDFS-13891
Commit: a67642c3776156ee941f12f9481160c729c56027
Parents: 703b286
Author: Takanobu Asanuma 
Authored: Tue Nov 13 13:57:07 2018 +0900
Committer: Takanobu Asanuma 
Committed: Tue Nov 13 13:57:07 2018 +0900

--
 start-build-env.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67642c3/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index bf6b411..e3d9f2f 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -61,7 +61,7 @@ docker build -t "hadoop-build-${USER_ID}" - 
< 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
+RUN echo "${USER_NAME} ALL=NOPASSWD: ALL" > 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
 ENV HOME /home/${USER_NAME}
 
 UserSpecificDocker


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/49] hadoop git commit: HADOOP-15916. Upgrade Maven Surefire plugin to 3.0.0-M1.

2018-11-12 Thread brahma
HADOOP-15916. Upgrade Maven Surefire plugin to 3.0.0-M1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a736b5da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a736b5da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a736b5da

Branch: refs/heads/HDFS-13891
Commit: a736b5da15084e8eb93d2f68f8eccc506ff7bea7
Parents: 9dbb2b6
Author: Akira Ajisaka 
Authored: Sat Nov 10 00:24:56 2018 +0900
Committer: Akira Ajisaka 
Committed: Sat Nov 10 00:24:56 2018 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a736b5da/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 5d38167..c985d7b 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -127,7 +127,7 @@
 
 
 -Xmx2048m 
-XX:+HeapDumpOnOutOfMemoryError
-2.22.1
+3.0.0-M1
 
${maven-surefire-plugin.version}
 
${maven-surefire-plugin.version}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/49] hadoop git commit: YARN-8902. [CSI] Add volume manager that manages CSI volume lifecycle. Contributed by Weiwei Yang.

2018-11-12 Thread brahma
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e728444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
new file mode 100644
index 000..f275768
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.volume.csi.processor;
+
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.VolumeManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.volume.csi.lifecycle.Volume;
+import 
org.apache.hadoop.yarn.server.resourcemanager.volume.csi.lifecycle.VolumeImpl;
+import 
org.apache.hadoop.yarn.server.resourcemanager.volume.csi.provisioner.VolumeProvisioningResults;
+import 
org.apache.hadoop.yarn.server.resourcemanager.volume.csi.provisioner.VolumeProvisioningTask;
+import org.apache.hadoop.yarn.server.volume.csi.VolumeMetaData;
+import 
org.apache.hadoop.yarn.server.volume.csi.exception.InvalidVolumeException;
+import org.apache.hadoop.yarn.server.volume.csi.exception.VolumeException;
+import 
org.apache.hadoop.yarn.server.volume.csi.exception.VolumeProvisioningException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * AMS processor that handles volume resource requests.
+ *
+ */
+public class VolumeAMSProcessor implements ApplicationMasterServiceProcessor {
+
+  private static final Logger LOG =  LoggerFactory
+  .getLogger(VolumeAMSProcessor.class);
+
+  private ApplicationMasterServiceProcessor nextAMSProcessor;
+  private VolumeManager volumeManager;
+
+  @Override
+  public void init(ApplicationMasterServiceContext amsContext,
+  ApplicationMasterServiceProcessor nextProcessor) {
+LOG.info("Initializing CSI volume processor");
+this.nextAMSProcessor = nextProcessor;
+this.volumeManager = ((RMContext) amsContext).getVolumeManager();
+  }
+
+  @Override
+  public void registerApplicationMaster(
+  ApplicationAttemptId applicationAttemptId,
+  RegisterApplicationMasterRequest request,
+  RegisterApplicationMasterResponse response)
+  throws IOException, YarnException {
+this.nextAMSProcessor.registerApplicationMaster(applicationAttemptId,
+request, response);

[33/49] hadoop git commit: HDDS-576. Move ContainerWithPipeline creation to RPC endpoint. Contributed by Nanda kumar.

2018-11-12 Thread brahma
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18fe65d7/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index aada723..30e3536 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -102,10 +103,10 @@ public class TestCloseContainerByPipeline {
 .get(0).getBlocksLatestVersionOnly().get(0);
 
 long containerID = omKeyLocationInfo.getContainerID();
+ContainerInfo container = cluster.getStorageContainerManager()
+.getContainerManager().getContainer(ContainerID.valueof(containerID));
 Pipeline pipeline = cluster.getStorageContainerManager()
-.getContainerManager().getContainerWithPipeline(
-ContainerID.valueof(containerID))
-.getPipeline();
+.getPipelineManager().getPipeline(container.getPipelineID());
 List datanodes = pipeline.getNodes();
 Assert.assertEquals(datanodes.size(), 1);
 
@@ -158,10 +159,10 @@ public class TestCloseContainerByPipeline {
 .get(0).getBlocksLatestVersionOnly().get(0);
 
 long containerID = omKeyLocationInfo.getContainerID();
+ContainerInfo container = cluster.getStorageContainerManager()
+.getContainerManager().getContainer(ContainerID.valueof(containerID));
 Pipeline pipeline = cluster.getStorageContainerManager()
-.getContainerManager().getContainerWithPipeline(
-ContainerID.valueof(containerID))
-.getPipeline();
+.getPipelineManager().getPipeline(container.getPipelineID());
 List datanodes = pipeline.getNodes();
 Assert.assertEquals(datanodes.size(), 1);
 
@@ -216,10 +217,10 @@ public class TestCloseContainerByPipeline {
 .get(0).getBlocksLatestVersionOnly().get(0);
 
 long containerID = omKeyLocationInfo.getContainerID();
+ContainerInfo container = cluster.getStorageContainerManager()
+.getContainerManager().getContainer(ContainerID.valueof(containerID));
 Pipeline pipeline = cluster.getStorageContainerManager()
-.getContainerManager().getContainerWithPipeline(
-ContainerID.valueof(containerID))
-.getPipeline();
+.getPipelineManager().getPipeline(container.getPipelineID());
 List datanodes = pipeline.getNodes();
 Assert.assertEquals(3, datanodes.size());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18fe65d7/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index f3ce899..9cf51d1 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
@@ -80,28 +81,30 @@ public class TestCloseContainerHandler {
 cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
   

[48/49] hadoop git commit: HDFS-14011. RBF: Add more information to HdfsFileStatus for a mount point. Contributed by Akira Ajisaka.

2018-11-12 Thread brahma
HDFS-14011. RBF: Add more information to HdfsFileStatus for a mount point. 
Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b94d1d44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b94d1d44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b94d1d44

Branch: refs/heads/HDFS-13891
Commit: b94d1d44740c35f1b2d0af7a71eb357aec58fa52
Parents: af22e35
Author: Yiqun Lin 
Authored: Tue Oct 23 14:34:29 2018 +0800
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:18:57 2018 +0530

--
 .../resolver/FileSubclusterResolver.java|  6 ++-
 .../federation/router/RouterClientProtocol.java | 30 ---
 .../router/RouterQuotaUpdateService.java|  9 ++--
 .../hdfs/server/federation/MockResolver.java| 17 +++---
 .../federation/router/TestRouterMountTable.java | 55 +++-
 .../router/TestRouterRpcMultiDestination.java   |  5 +-
 6 files changed, 97 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b94d1d44/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
index 5aa5ec9..6432bb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
@@ -61,8 +61,10 @@ public interface FileSubclusterResolver {
* cache.
*
* @param path Path to get the mount points under.
-   * @return List of mount points present at this path or zero-length list if
-   * none are found.
+   * @return List of mount points present at this path. Return zero-length
+   * list if the path is a mount point but there are no mount points
+   * under the path. Return null if the path is not a mount point
+   * and there are no mount points under the path.
* @throws IOException Throws exception if the data is not available.
*/
   List getMountPoints(String path) throws IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b94d1d44/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index ddbc014..de94eaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -718,6 +718,9 @@ public class RouterClientProtocol implements ClientProtocol 
{
   date = dates.get(src);
 }
 ret = getMountPointStatus(src, children.size(), date);
+  } else if (children != null) {
+// The src is a mount point, but there are no files or directories
+ret = getMountPointStatus(src, 0, 0);
   }
 }
 
@@ -1714,13 +1717,26 @@ public class RouterClientProtocol implements 
ClientProtocol {
 FsPermission permission = FsPermission.getDirDefault();
 String owner = this.superUser;
 String group = this.superGroup;
-try {
-  // TODO support users, it should be the user for the pointed folder
-  UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
-  owner = ugi.getUserName();
-  group = ugi.getPrimaryGroupName();
-} catch (IOException e) {
-  LOG.error("Cannot get the remote user: {}", e.getMessage());
+if (subclusterResolver instanceof MountTableResolver) {
+  try {
+MountTableResolver mountTable = (MountTableResolver) 
subclusterResolver;
+MountTable entry = mountTable.getMountPoint(name);
+if (entry != null) {
+  permission = entry.getMode();
+  owner = entry.getOwnerName();
+  group = entry.getGroupName();
+}
+  } catch (IOException e) {
+LOG.error("Cannot get mount point: {}", e.getMessage());
+  }
+} else {
+  try {
+UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
+   

[20/49] hadoop git commit: HADOOP-15812. ABFS: Improve AbfsRestOperationException format to ensure full msg can be displayed on console.

2018-11-12 Thread brahma
HADOOP-15812. ABFS: Improve AbfsRestOperationException format to ensure full 
msg can be displayed on console.

Author:Da Zhou 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dbb2b67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dbb2b67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dbb2b67

Branch: refs/heads/HDFS-13891
Commit: 9dbb2b67c6fa69a663c7c23fbb876bac4b6c09ce
Parents: 47194fe
Author: Da Zhou 
Authored: Fri Nov 9 11:06:23 2018 +
Committer: Steve Loughran 
Committed: Fri Nov 9 11:06:23 2018 +

--
 .../exceptions/AbfsRestOperationException.java  | 24 +--
 .../ITestAbfsRestOperationException.java| 75 
 2 files changed, 92 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dbb2b67/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java
index 149f916..36f7589 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java
@@ -81,13 +81,23 @@ public class AbfsRestOperationException extends 
AzureBlobFileSystemException {
   }
 
   private static String formatMessage(final AbfsHttpOperation 
abfsHttpOperation) {
+// HEAD request response doesn't have StorageErrorCode, 
StorageErrorMessage.
+if (abfsHttpOperation.getMethod().equals("HEAD")) {
+  return String.format(
+  "Operation failed: \"%1$s\", %2$s, HEAD, %3$s",
+  abfsHttpOperation.getStatusDescription(),
+  abfsHttpOperation.getStatusCode(),
+  abfsHttpOperation.getUrl().toString());
+}
+
 return String.format(
-"%1$s 
%2$s%nStatusCode=%3$s%nStatusDescription=%4$s%nErrorCode=%5$s%nErrorMessage=%6$s",
-abfsHttpOperation.getMethod(),
-abfsHttpOperation.getUrl().toString(),
-abfsHttpOperation.getStatusCode(),
-abfsHttpOperation.getStatusDescription(),
-abfsHttpOperation.getStorageErrorCode(),
-abfsHttpOperation.getStorageErrorMessage());
+"Operation failed: \"%1$s\", %2$s, %3$s, %4$s, %5$s, \"%6$s\"",
+abfsHttpOperation.getStatusDescription(),
+abfsHttpOperation.getStatusCode(),
+abfsHttpOperation.getMethod(),
+abfsHttpOperation.getUrl().toString(),
+abfsHttpOperation.getStorageErrorCode(),
+// Remove break line to ensure the request id and timestamp can be 
shown in console.
+abfsHttpOperation.getStorageErrorMessage().replaceAll("\\n", " "));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dbb2b67/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java
new file mode 100644
index 000..ff88b02
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+import org.junit.Assert;
+import 

[3/4] hadoop git commit: HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. Contributed by CR Hota

2018-11-12 Thread brahma
HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. 
Contributed by CR Hota

(cherry picked from commit e7b63baca1e10b28d8b4462fd80537b871951aa3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d72c1348
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d72c1348
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d72c1348

Branch: refs/heads/branch-3.1
Commit: d72c134889b4aa1b3fd33575ceee1f2339836523
Parents: 1959ca6
Author: Brahma Reddy Battula 
Authored: Tue Nov 13 12:45:13 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:11:39 2018 +0530

--
 .../web/resources/NamenodeWebHdfsMethods.java   | 26 ++--
 1 file changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72c1348/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index a8ab798..fa4bd37 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -334,9 +334,22 @@ public class NamenodeWebHdfsMethods {
 throw new IOException("No active nodes contain this block");
   }
 
-  private Token generateDelegationToken(
-  final NameNode namenode, final UserGroupInformation ugi,
+  public long renewDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+return cp.renewDelegationToken(token);
+  }
+
+  public void cancelDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+cp.cancelDelegationToken(token);
+  }
+
+  public Token generateDelegationToken(
+  final UserGroupInformation ugi,
   final String renewer) throws IOException {
+final NameNode namenode = (NameNode)context.getAttribute("name.node");
 final Credentials c = DelegationTokenSecretManager.createCredentials(
 namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
 if (c == null) {
@@ -381,7 +394,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, null);
+  ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 
@@ -702,7 +715,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  final long expiryTime = cp.renewDelegationToken(token);
+  final long expiryTime = renewDelegationToken(token);
   final String js = JsonUtil.toJsonString("long", expiryTime);
   return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
 }
@@ -711,7 +724,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  cp.cancelDelegationToken(token);
+  cancelDelegationToken(token);
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case MODIFYACLENTRIES: {
@@ -1123,9 +1136,8 @@ public class NamenodeWebHdfsMethods {
 throw new IllegalArgumentException(delegation.getName()
 + " parameter is not null.");
   }
-  final NameNode namenode = (NameNode)context.getAttribute("name.node");
   final Token token = generateDelegationToken(
-  namenode, ugi, renewer.getValue());
+  ugi, renewer.getValue());
 
   final String setServiceName = tokenService.getValue();
   final String setKind = tokenKind.getValue();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/4] hadoop git commit: HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. Contributed by CR Hota

2018-11-12 Thread brahma
HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. 
Contributed by CR Hota

(cherry picked from commit e7b63baca1e10b28d8b4462fd80537b871951aa3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04bba915
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04bba915
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04bba915

Branch: refs/heads/branch-3.0
Commit: 04bba915855bd7a5e055c3349d3068188f4dc645
Parents: ff1cbb4
Author: Brahma Reddy Battula 
Authored: Tue Nov 13 12:45:13 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:12:56 2018 +0530

--
 .../web/resources/NamenodeWebHdfsMethods.java   | 26 ++--
 1 file changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04bba915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index e9f5628..238b529 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -333,9 +333,22 @@ public class NamenodeWebHdfsMethods {
 throw new IOException("No active nodes contain this block");
   }
 
-  private Token generateDelegationToken(
-  final NameNode namenode, final UserGroupInformation ugi,
+  public long renewDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+return cp.renewDelegationToken(token);
+  }
+
+  public void cancelDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+cp.cancelDelegationToken(token);
+  }
+
+  public Token generateDelegationToken(
+  final UserGroupInformation ugi,
   final String renewer) throws IOException {
+final NameNode namenode = (NameNode)context.getAttribute("name.node");
 final Credentials c = DelegationTokenSecretManager.createCredentials(
 namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
 if (c == null) {
@@ -380,7 +393,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, null);
+  ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 
@@ -701,7 +714,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  final long expiryTime = cp.renewDelegationToken(token);
+  final long expiryTime = renewDelegationToken(token);
   final String js = JsonUtil.toJsonString("long", expiryTime);
   return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
 }
@@ -710,7 +723,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  cp.cancelDelegationToken(token);
+  cancelDelegationToken(token);
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case MODIFYACLENTRIES: {
@@ -1120,9 +1133,8 @@ public class NamenodeWebHdfsMethods {
 throw new IllegalArgumentException(delegation.getName()
 + " parameter is not null.");
   }
-  final NameNode namenode = (NameNode)context.getAttribute("name.node");
   final Token token = generateDelegationToken(
-  namenode, ugi, renewer.getValue());
+  ugi, renewer.getValue());
 
   final String setServiceName = tokenService.getValue();
   final String setKind = tokenKind.getValue();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/4] hadoop git commit: HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. Contributed by CR Hota

2018-11-12 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ff1cbb48c -> 04bba9158
  refs/heads/branch-3.1 1959ca6f2 -> d72c13488
  refs/heads/branch-3.2 359b1dd40 -> 5e8664126
  refs/heads/trunk a67642c37 -> e7b63baca


HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. 
Contributed by CR Hota


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7b63bac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7b63bac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7b63bac

Branch: refs/heads/trunk
Commit: e7b63baca1e10b28d8b4462fd80537b871951aa3
Parents: a67642c
Author: Brahma Reddy Battula 
Authored: Tue Nov 13 12:45:13 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 12:45:44 2018 +0530

--
 .../web/resources/NamenodeWebHdfsMethods.java   | 26 ++--
 1 file changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7b63bac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index d73fd45..c4d3239 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -337,9 +337,22 @@ public class NamenodeWebHdfsMethods {
 throw new IOException("No active nodes contain this block");
   }
 
-  private Token generateDelegationToken(
-  final NameNode namenode, final UserGroupInformation ugi,
+  public long renewDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+return cp.renewDelegationToken(token);
+  }
+
+  public void cancelDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+cp.cancelDelegationToken(token);
+  }
+
+  public Token generateDelegationToken(
+  final UserGroupInformation ugi,
   final String renewer) throws IOException {
+final NameNode namenode = (NameNode)context.getAttribute("name.node");
 final Credentials c = DelegationTokenSecretManager.createCredentials(
 namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
 if (c == null) {
@@ -384,7 +397,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, null);
+  ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 
@@ -705,7 +718,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  final long expiryTime = cp.renewDelegationToken(token);
+  final long expiryTime = renewDelegationToken(token);
   final String js = JsonUtil.toJsonString("long", expiryTime);
   return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
 }
@@ -714,7 +727,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  cp.cancelDelegationToken(token);
+  cancelDelegationToken(token);
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case MODIFYACLENTRIES: {
@@ -1138,9 +1151,8 @@ public class NamenodeWebHdfsMethods {
 throw new IllegalArgumentException(delegation.getName()
 + " parameter is not null.");
   }
-  final NameNode namenode = (NameNode)context.getAttribute("name.node");
   final Token token = generateDelegationToken(
-  namenode, ugi, renewer.getValue());
+  ugi, renewer.getValue());
 
   final String setServiceName = tokenService.getValue();
   final String setKind = tokenKind.getValue();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/4] hadoop git commit: HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. Contributed by CR Hota

2018-11-12 Thread brahma
HDFS-14070. Refactor NameNodeWebHdfsMethods to allow better extensibility. 
Contributed by CR Hota

(cherry picked from commit e7b63baca1e10b28d8b4462fd80537b871951aa3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e866412
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e866412
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e866412

Branch: refs/heads/branch-3.2
Commit: 5e86641265ef3ef7e30a106fd4608849806fe3b3
Parents: 359b1dd
Author: Brahma Reddy Battula 
Authored: Tue Nov 13 12:45:13 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Nov 13 13:09:03 2018 +0530

--
 .../web/resources/NamenodeWebHdfsMethods.java   | 26 ++--
 1 file changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e866412/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index d73fd45..c4d3239 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -337,9 +337,22 @@ public class NamenodeWebHdfsMethods {
 throw new IOException("No active nodes contain this block");
   }
 
-  private Token generateDelegationToken(
-  final NameNode namenode, final UserGroupInformation ugi,
+  public long renewDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+return cp.renewDelegationToken(token);
+  }
+
+  public void cancelDelegationToken(Token token)
+  throws IOException {
+ClientProtocol cp = getRpcClientProtocol();
+cp.cancelDelegationToken(token);
+  }
+
+  public Token generateDelegationToken(
+  final UserGroupInformation ugi,
   final String renewer) throws IOException {
+final NameNode namenode = (NameNode)context.getAttribute("name.node");
 final Credentials c = DelegationTokenSecretManager.createCredentials(
 namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
 if (c == null) {
@@ -384,7 +397,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, null);
+  ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 
@@ -705,7 +718,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  final long expiryTime = cp.renewDelegationToken(token);
+  final long expiryTime = renewDelegationToken(token);
   final String js = JsonUtil.toJsonString("long", expiryTime);
   return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
 }
@@ -714,7 +727,7 @@ public class NamenodeWebHdfsMethods {
   validateOpParams(op, delegationTokenArgument);
   final Token token = new 
Token();
   token.decodeFromUrlString(delegationTokenArgument.getValue());
-  cp.cancelDelegationToken(token);
+  cancelDelegationToken(token);
   return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
 }
 case MODIFYACLENTRIES: {
@@ -1138,9 +1151,8 @@ public class NamenodeWebHdfsMethods {
 throw new IllegalArgumentException(delegation.getName()
 + " parameter is not null.");
   }
-  final NameNode namenode = (NameNode)context.getAttribute("name.node");
   final Token token = generateDelegationToken(
-  namenode, ugi, renewer.getValue());
+  ugi, renewer.getValue());
 
   final String setServiceName = tokenService.getValue();
   final String setKind = tokenKind.getValue();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15912. start-build-env.sh still creates an invalid /etc/sudoers.d/hadoop-build-${USER_ID} file entry after HADOOP-15802. Contributed by Akira Ajisaka.

2018-11-12 Thread tasanuma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 e67e00724 -> 1959ca6f2


HADOOP-15912. start-build-env.sh still creates an invalid 
/etc/sudoers.d/hadoop-build-${USER_ID} file entry after HADOOP-15802. 
Contributed by Akira Ajisaka.

(cherry picked from commit a67642c3776156ee941f12f9481160c729c56027)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1959ca6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1959ca6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1959ca6f

Branch: refs/heads/branch-3.1
Commit: 1959ca6f231f9a1fdebba0c70f6e850b0ab4d635
Parents: e67e007
Author: Takanobu Asanuma 
Authored: Tue Nov 13 13:57:07 2018 +0900
Committer: Takanobu Asanuma 
Committed: Tue Nov 13 13:59:09 2018 +0900

--
 start-build-env.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1959ca6f/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index 6c8ddb33..284f889 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -61,7 +61,7 @@ docker build -t "hadoop-build-${USER_ID}" - 
< 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
+RUN echo "${USER_NAME} ALL=NOPASSWD: ALL" > 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
 ENV HOME /home/${USER_NAME}
 
 UserSpecificDocker


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15912. start-build-env.sh still creates an invalid /etc/sudoers.d/hadoop-build-${USER_ID} file entry after HADOOP-15802. Contributed by Akira Ajisaka.

2018-11-12 Thread tasanuma
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 861105747 -> 359b1dd40


HADOOP-15912. start-build-env.sh still creates an invalid 
/etc/sudoers.d/hadoop-build-${USER_ID} file entry after HADOOP-15802. 
Contributed by Akira Ajisaka.

(cherry picked from commit a67642c3776156ee941f12f9481160c729c56027)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/359b1dd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/359b1dd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/359b1dd4

Branch: refs/heads/branch-3.2
Commit: 359b1dd40bc087fb10a538ea89578d5cc00d542f
Parents: 8611057
Author: Takanobu Asanuma 
Authored: Tue Nov 13 13:57:07 2018 +0900
Committer: Takanobu Asanuma 
Committed: Tue Nov 13 13:58:29 2018 +0900

--
 start-build-env.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/359b1dd4/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index bf6b411..e3d9f2f 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -61,7 +61,7 @@ docker build -t "hadoop-build-${USER_ID}" - 
< 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
+RUN echo "${USER_NAME} ALL=NOPASSWD: ALL" > 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
 ENV HOME /home/${USER_NAME}
 
 UserSpecificDocker


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15912. start-build-env.sh still creates an invalid /etc/sudoers.d/hadoop-build-${USER_ID} file entry after HADOOP-15802. Contributed by Akira Ajisaka.

2018-11-12 Thread tasanuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 703b2860a -> a67642c37


HADOOP-15912. start-build-env.sh still creates an invalid 
/etc/sudoers.d/hadoop-build-${USER_ID} file entry after HADOOP-15802. 
Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a67642c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a67642c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a67642c3

Branch: refs/heads/trunk
Commit: a67642c3776156ee941f12f9481160c729c56027
Parents: 703b286
Author: Takanobu Asanuma 
Authored: Tue Nov 13 13:57:07 2018 +0900
Committer: Takanobu Asanuma 
Committed: Tue Nov 13 13:57:07 2018 +0900

--
 start-build-env.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67642c3/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index bf6b411..e3d9f2f 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -61,7 +61,7 @@ docker build -t "hadoop-build-${USER_ID}" - 
< 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
+RUN echo "${USER_NAME} ALL=NOPASSWD: ALL" > 
"/etc/sudoers.d/hadoop-build-${USER_ID}"
 ENV HOME /home/${USER_NAME}
 
 UserSpecificDocker


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15923. create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent.

2018-11-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.2 fec6c8a46 -> 826afbeae


HADOOP-15923. create-release script should set max-cache-ttl as well as 
default-cache-ttl for gpg-agent.

(cherry picked from commit 703b2860a49577629e7b3ef461d8a61292e79c88)
(cherry picked from commit a86b665340696bd83b6062025282dda40eaaa9fb)
(cherry picked from commit 5884b1c28f4bd9137e6eb04ab7d0ad080b55b50f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/826afbea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/826afbea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/826afbea

Branch: refs/heads/branch-2.9.2
Commit: 826afbeae31ca687bc2f8471dc841b66ed2c6704
Parents: fec6c8a
Author: Akira Ajisaka 
Authored: Tue Nov 13 13:40:43 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 13 13:45:18 2018 +0900

--
 dev-support/bin/create-release | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/826afbea/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 39c9133..fbe3fb2 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -259,7 +259,8 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 36000" > "${LOGDIR}/gpgagent.conf"
+  echo "max-cache-ttl 36000" >> "${LOGDIR}/gpgagent.conf"
   # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15923. create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent.

2018-11-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 31aafb886 -> e3593c09d


HADOOP-15923. create-release script should set max-cache-ttl as well as 
default-cache-ttl for gpg-agent.

(cherry picked from commit 703b2860a49577629e7b3ef461d8a61292e79c88)
(cherry picked from commit a86b665340696bd83b6062025282dda40eaaa9fb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3593c09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3593c09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3593c09

Branch: refs/heads/branch-2.8
Commit: e3593c09d429b786fc98de516fc520f622e957be
Parents: 31aafb8
Author: Akira Ajisaka 
Authored: Tue Nov 13 13:40:43 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 13 13:44:49 2018 +0900

--
 dev-support/bin/create-release | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3593c09/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 8b138e5..09066a1 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -259,7 +259,8 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 36000" > "${LOGDIR}/gpgagent.conf"
+  echo "max-cache-ttl 36000" >> "${LOGDIR}/gpgagent.conf"
   # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15923. create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent.

2018-11-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 7f81ebeb9 -> 5884b1c28


HADOOP-15923. create-release script should set max-cache-ttl as well as 
default-cache-ttl for gpg-agent.

(cherry picked from commit 703b2860a49577629e7b3ef461d8a61292e79c88)
(cherry picked from commit a86b665340696bd83b6062025282dda40eaaa9fb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5884b1c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5884b1c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5884b1c2

Branch: refs/heads/branch-2.9
Commit: 5884b1c28f4bd9137e6eb04ab7d0ad080b55b50f
Parents: 7f81ebe
Author: Akira Ajisaka 
Authored: Tue Nov 13 13:40:43 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 13 13:43:05 2018 +0900

--
 dev-support/bin/create-release | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5884b1c2/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 39c9133..fbe3fb2 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -259,7 +259,8 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 36000" > "${LOGDIR}/gpgagent.conf"
+  echo "max-cache-ttl 36000" >> "${LOGDIR}/gpgagent.conf"
   # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15923. create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent.

2018-11-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b7ca20e16 -> a86b66534


HADOOP-15923. create-release script should set max-cache-ttl as well as 
default-cache-ttl for gpg-agent.

(cherry picked from commit 703b2860a49577629e7b3ef461d8a61292e79c88)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a86b6653
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a86b6653
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a86b6653

Branch: refs/heads/branch-2
Commit: a86b665340696bd83b6062025282dda40eaaa9fb
Parents: b7ca20e
Author: Akira Ajisaka 
Authored: Tue Nov 13 13:40:43 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 13 13:42:37 2018 +0900

--
 dev-support/bin/create-release | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a86b6653/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 39c9133..fbe3fb2 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -259,7 +259,8 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 36000" > "${LOGDIR}/gpgagent.conf"
+  echo "max-cache-ttl 36000" >> "${LOGDIR}/gpgagent.conf"
   # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15923. create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent.

2018-11-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b6ef824e7 -> e67e00724


HADOOP-15923. create-release script should set max-cache-ttl as well as 
default-cache-ttl for gpg-agent.

(cherry picked from commit 703b2860a49577629e7b3ef461d8a61292e79c88)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e67e0072
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e67e0072
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e67e0072

Branch: refs/heads/branch-3.1
Commit: e67e00724ebfc52bd6084dbf2bfbc674c2c7fa6f
Parents: b6ef824
Author: Akira Ajisaka 
Authored: Tue Nov 13 13:40:43 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 13 13:42:01 2018 +0900

--
 dev-support/bin/create-release | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e67e0072/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 949a46f..b0f01d7 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -253,7 +253,8 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 36000" > "${LOGDIR}/gpgagent.conf"
+  echo "max-cache-ttl 36000" >> "${LOGDIR}/gpgagent.conf"
   # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15923. create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent.

2018-11-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 e6532f7eb -> 861105747


HADOOP-15923. create-release script should set max-cache-ttl as well as 
default-cache-ttl for gpg-agent.

(cherry picked from commit 703b2860a49577629e7b3ef461d8a61292e79c88)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86110574
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86110574
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86110574

Branch: refs/heads/branch-3.2
Commit: 861105747bdb8c1ade3369237c10d159cce34bf1
Parents: e6532f7
Author: Akira Ajisaka 
Authored: Tue Nov 13 13:40:43 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 13 13:41:42 2018 +0900

--
 dev-support/bin/create-release | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86110574/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 6ec3503..c861654 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -253,7 +253,8 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 36000" > "${LOGDIR}/gpgagent.conf"
+  echo "max-cache-ttl 36000" >> "${LOGDIR}/gpgagent.conf"
   # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15923. create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent.

2018-11-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk f8713f8ad -> 703b2860a


HADOOP-15923. create-release script should set max-cache-ttl as well as 
default-cache-ttl for gpg-agent.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/703b2860
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/703b2860
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/703b2860

Branch: refs/heads/trunk
Commit: 703b2860a49577629e7b3ef461d8a61292e79c88
Parents: f8713f8
Author: Akira Ajisaka 
Authored: Tue Nov 13 13:40:43 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 13 13:40:43 2018 +0900

--
 dev-support/bin/create-release | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/703b2860/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 6ec3503..c861654 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -253,7 +253,8 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 36000" > "${LOGDIR}/gpgagent.conf"
+  echo "max-cache-ttl 36000" >> "${LOGDIR}/gpgagent.conf"
   # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-831. TestOzoneShell in integration-test is flaky. Contributed by Nanda kumar.

2018-11-12 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4c465f553 -> f8713f8ad


HDDS-831. TestOzoneShell in integration-test is flaky. Contributed by Nanda 
kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8713f8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8713f8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8713f8a

Branch: refs/heads/trunk
Commit: f8713f8adea9d69330933a2cde594ed11ed9520c
Parents: 4c465f5
Author: Yiqun Lin 
Authored: Tue Nov 13 10:38:27 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Nov 13 10:38:27 2018 +0800

--
 .../test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8713f8a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 1900024..bd05b92 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -266,7 +266,7 @@ public class TestOzoneShell {
*/
   @Test
   public void testCreateVolumeWithoutUser() throws Exception {
-String volumeName = "volume" + RandomStringUtils.randomNumeric(1);
+String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
 String[] args = new String[] {"volume", "create", url + "/" + volumeName,
 "--root"};
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8761. Service AM support for decommissioning component instances. Contributed by Billie Rinaldi

2018-11-12 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk b6d4e19f3 -> 4c465f553


YARN-8761. Service AM support for decommissioning component instances.
   Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c465f55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c465f55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c465f55

Branch: refs/heads/trunk
Commit: 4c465f5535054dad2ef0b18128fb115129f6939e
Parents: b6d4e19
Author: Eric Yang 
Authored: Mon Nov 12 19:53:10 2018 -0500
Committer: Eric Yang 
Committed: Mon Nov 12 19:53:10 2018 -0500

--
 .../yarn/service/client/ApiServiceClient.java   |  28 
 .../hadoop/yarn/service/webapp/ApiServer.java   |  40 +
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   5 +
 .../hadoop/yarn/service/ClientAMProtocol.java   |   6 +
 .../hadoop/yarn/service/ClientAMService.java|  20 +++
 .../yarn/service/api/records/Component.java |  26 
 .../yarn/service/client/ServiceClient.java  |  60 +++-
 .../yarn/service/component/Component.java   |  64 +++-
 .../yarn/service/component/ComponentEvent.java  |  10 ++
 .../service/component/ComponentEventType.java   |   3 +-
 .../component/instance/ComponentInstance.java   |  18 +--
 .../pb/client/ClientAMProtocolPBClientImpl.java |  14 ++
 .../service/ClientAMProtocolPBServiceImpl.java  |  13 ++
 .../yarn/service/utils/ServiceApiUtil.java  |  56 ++-
 .../src/main/proto/ClientAMProtocol.proto   |  11 +-
 .../hadoop/yarn/service/ServiceTestUtils.java   |   2 +-
 .../TestComponentDecommissionInstances.java | 147 +++
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  26 +++-
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  15 +-
 .../hadoop/yarn/client/api/AppAdminClient.java  |  12 ++
 .../src/site/markdown/YarnCommands.md   |   3 +
 .../markdown/yarn-service/YarnServiceAPI.md |   1 +
 22 files changed, 548 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c465f55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
index 851acbd..38cfd11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -746,4 +746,32 @@ public class ApiServiceClient extends AppAdminClient {
 }
 return result;
   }
+
+  @Override
+  public int actionDecommissionInstances(String appName, List
+  componentInstances) throws IOException, YarnException {
+int result = EXIT_SUCCESS;
+try {
+  Service service = new Service();
+  service.setName(appName);
+  for (String instance : componentInstances) {
+String componentName = ServiceApiUtil.parseComponentName(instance);
+Component component = service.getComponent(componentName);
+if (component == null) {
+  component = new Component();
+  component.setName(componentName);
+  service.addComponent(component);
+}
+component.addDecommissionedInstance(instance);
+  }
+  String buffer = jsonSerDeser.toJson(service);
+  ClientResponse response = getApiClient(getServicePath(appName))
+  .put(ClientResponse.class, buffer);
+  result = processResponse(response);
+} catch (Exception e) {
+  LOG.error("Fail to decommission instance: ", e);
+  result = EXIT_EXCEPTION_THROWN;
+}
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c465f55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 

[25/50] [abbrv] hadoop git commit: HDDS-826. Update Ratis to 0.3.0-6f3419a-SNAPSHOT.

2018-11-12 Thread botong
HDDS-826. Update Ratis to 0.3.0-6f3419a-SNAPSHOT.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/298d2502
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/298d2502
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/298d2502

Branch: refs/heads/YARN-7402
Commit: 298d2502b0255270c829225373a456a5560aac73
Parents: 9fe50b4
Author: Tsz Wo Nicholas Sze 
Authored: Fri Nov 9 18:35:40 2018 -0800
Committer: Tsz Wo Nicholas Sze 
Committed: Fri Nov 9 18:35:40 2018 -0800

--
 hadoop-hdds/pom.xml  | 2 +-
 hadoop-ozone/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/298d2502/hadoop-hdds/pom.xml
--
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 7a1704c..a6b0d84 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -45,7 +45,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 0.4.0-SNAPSHOT
 
 
-0.3.0-1d07b18-SNAPSHOT
+0.3.0-6f3419a-SNAPSHOT
 
 1.60
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/298d2502/hadoop-ozone/pom.xml
--
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 671421e..5bd64a8 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -33,7 +33,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 3.2.1-SNAPSHOT
 0.4.0-SNAPSHOT
 0.4.0-SNAPSHOT
-0.3.0-1d07b18-SNAPSHOT
+0.3.0-6f3419a-SNAPSHOT
 1.60
 Badlands
 ${ozone.version}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: YARN-7953. [GQ] Data structures for federation global queues calculations. Contributed by Abhishek Modi.

2018-11-12 Thread botong
YARN-7953. [GQ] Data structures for federation global queues calculations. 
Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e8686b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e8686b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e8686b4

Branch: refs/heads/YARN-7402
Commit: 1e8686b448c8e1400bb511e940ed4ebc6a38a7e1
Parents: b78f4e5
Author: Botong Huang 
Authored: Thu Aug 16 08:28:35 2018 -0700
Committer: Botong Huang 
Committed: Mon Nov 12 15:09:38 2018 -0800

--
 .../pom.xml |   3 +
 ...ederationGlobalQueueValidationException.java |  28 +
 .../globalqueues/FederationGlobalView.java  | 198 +
 .../globalqueues/FederationQueue.java   | 761 +++
 .../globalqueues/package-info.java  |  17 +
 .../globalqueues/GlobalQueueTestUtil.java   | 133 
 .../globalqueues/TestFederationQueue.java   |  98 +++
 .../resources/globalqueues/basic-queue.json |   9 +
 .../globalqueues/tree-queue-adaptable.json  |  96 +++
 .../test/resources/globalqueues/tree-queue.json | 128 
 10 files changed, 1471 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e8686b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
index c137c9e..f0097af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -108,6 +108,9 @@
   
 src/test/resources/schedulerInfo1.json
 src/test/resources/schedulerInfo2.json
+src/test/resources/globalqueues/basic-queue.json
+src/test/resources/globalqueues/tree-queue.json
+
src/test/resources/globalqueues/tree-queue-adaptable.json
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e8686b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/globalqueues/FederationGlobalQueueValidationException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/globalqueues/FederationGlobalQueueValidationException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/globalqueues/FederationGlobalQueueValidationException.java
new file mode 100644
index 000..3a18763
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/globalqueues/FederationGlobalQueueValidationException.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.globalpolicygenerator.globalqueues;
+
+/**
+ * Exception thrown when FederationQueue is not valid.
+ */
+public class FederationGlobalQueueValidationException extends Exception {
+
+  public FederationGlobalQueueValidationException(String s) {
+super(s);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e8686b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/globalqueues/FederationGlobalView.java
--
diff 

[13/50] [abbrv] hadoop git commit: HDDS-802. Container State Manager should get open pipelines for allocating container. Contributed by Lokesh Jain.

2018-11-12 Thread botong
HDDS-802. Container State Manager should get open pipelines for allocating 
container. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9317a61f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9317a61f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9317a61f

Branch: refs/heads/YARN-7402
Commit: 9317a61f3cdc5ca91c6934eec9898cee3d65441a
Parents: c80f753
Author: Yiqun Lin 
Authored: Thu Nov 8 23:41:43 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Nov 8 23:41:43 2018 +0800

--
 .../scm/container/ContainerStateManager.java|  4 +-
 .../hdds/scm/pipeline/PipelineManager.java  |  3 +
 .../hdds/scm/pipeline/PipelineStateManager.java |  5 ++
 .../hdds/scm/pipeline/PipelineStateMap.java | 22 +++
 .../hdds/scm/pipeline/SCMPipelineManager.java   | 11 
 .../scm/pipeline/TestPipelineStateManager.java  | 61 ++--
 6 files changed, 100 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9317a61f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 87505c3..74c8dcb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -248,8 +248,8 @@ public class ContainerStateManager {
 try {
   pipeline = pipelineManager.createPipeline(type, replicationFactor);
 } catch (IOException e) {
-  final List pipelines =
-  pipelineManager.getPipelines(type, replicationFactor);
+  final List pipelines = pipelineManager
+  .getPipelines(type, replicationFactor, Pipeline.PipelineState.OPEN);
   if (pipelines.isEmpty()) {
 throw new IOException("Could not allocate container");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9317a61f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index 04ec535..cce09f3 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -46,6 +46,9 @@ public interface PipelineManager extends Closeable {
   List getPipelines(ReplicationType type,
   ReplicationFactor factor);
 
+  List getPipelines(ReplicationType type,
+  ReplicationFactor factor, Pipeline.PipelineState state);
+
   void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID)
   throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9317a61f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index 67f74d3..9f95378 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -64,6 +64,11 @@ class PipelineStateManager {
 return pipelineStateMap.getPipelines(type, factor);
   }
 
+  List getPipelines(ReplicationType type, ReplicationFactor factor,
+  PipelineState state) {
+return pipelineStateMap.getPipelines(type, factor, state);
+  }
+
   List getPipelines(ReplicationType type, PipelineState... states) {
 return pipelineStateMap.getPipelines(type, states);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9317a61f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
index 7b69491..85790b2 100644
--- 

[41/50] [abbrv] hadoop git commit: YARN-7599. [GPG] ApplicationCleaner in Global Policy Generator. Contributed by Botong Huang.

2018-11-12 Thread botong
YARN-7599. [GPG] ApplicationCleaner in Global Policy Generator. Contributed by 
Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1facc8a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1facc8a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1facc8a2

Branch: refs/heads/YARN-7402
Commit: 1facc8a28416efb7424cec101c80e60ac76f25ac
Parents: 1e8686b
Author: Botong Huang 
Authored: Fri Sep 21 17:30:44 2018 -0700
Committer: Botong Huang 
Committed: Mon Nov 12 15:09:38 2018 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  25 +++
 .../src/main/resources/yarn-default.xml |  28 
 .../store/impl/MemoryFederationStateStore.java  |   2 -
 .../pb/ApplicationHomeSubClusterPBImpl.java |   3 +
 .../utils/FederationStateStoreFacade.java   |  33 
 .../server/globalpolicygenerator/GPGUtils.java  |  21 ++-
 .../GlobalPolicyGenerator.java  |  23 ++-
 .../applicationcleaner/ApplicationCleaner.java  | 154 +++
 .../DefaultApplicationCleaner.java  |  82 ++
 .../applicationcleaner/package-info.java|  19 +++
 .../TestDefaultApplicationCleaner.java  | 130 
 11 files changed, 513 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1facc8a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index c42ebb1..aa990d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3445,6 +3445,31 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
   public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180;
 
+  // The application cleaner class to use
+  public static final String GPG_APPCLEANER_CLASS =
+  FEDERATION_GPG_PREFIX + "application.cleaner.class";
+  public static final String DEFAULT_GPG_APPCLEANER_CLASS =
+  "org.apache.hadoop.yarn.server.globalpolicygenerator"
+  + ".applicationcleaner.DefaultApplicationCleaner";
+
+  // The interval at which the application cleaner runs, -1 means disabled
+  public static final String GPG_APPCLEANER_INTERVAL_MS =
+  FEDERATION_GPG_PREFIX + "application.cleaner.interval-ms";
+  public static final long DEFAULT_GPG_APPCLEANER_INTERVAL_MS = -1;
+
+  /**
+   * Specifications on how (many times) to contact Router for apps. We need to
+   * do this because Router might return partial application list because some
+   * sub-cluster RM is not responsive (e.g. failing over).
+   *
+   * Should have three values separated by comma: minimal success retries,
+   * maximum total retry, retry interval (ms).
+   */
+  public static final String GPG_APPCLEANER_CONTACT_ROUTER_SPEC =
+  FEDERATION_GPG_PREFIX + "application.cleaner.contact.router.spec";
+  public static final String DEFAULT_GPG_APPCLEANER_CONTACT_ROUTER_SPEC =
+  "3,10,60";
+
   public static final String FEDERATION_GPG_POLICY_PREFIX =
   FEDERATION_GPG_PREFIX + "policy.generator.";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1facc8a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a08ff96..e496e28 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3671,6 +3671,34 @@
 
   
 
+  The Application Cleaner implementation class for GPG to use.
+
+yarn.federation.gpg.application.cleaner.class
+
org.apache.hadoop.yarn.server.globalpolicygenerator.applicationcleaner.DefaultApplicationCleaner
+  
+
+  
+
+  The interval at which the application cleaner runs, -1 means disabled.
+
+yarn.federation.gpg.application.cleaner.interval-ms
+-1
+  
+
+  
+
+  Specifications on how (many times) to contact Router for apps. We need to
+  do this because Router might return partial application 

[10/50] [abbrv] hadoop git commit: YARN-8988. Reduce the verbose log on RM heartbeat path when distributed node-attributes is enabled. Contributed by Tao Yang.

2018-11-12 Thread botong
YARN-8988. Reduce the verbose log on RM heartbeat path when distributed 
node-attributes is enabled. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1bbf7dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1bbf7dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1bbf7dc

Branch: refs/heads/YARN-7402
Commit: e1bbf7dcdfc30a61a2b10bef09c59ff17d290488
Parents: f8c72d7
Author: Weiwei Yang 
Authored: Thu Nov 8 17:47:18 2018 +0800
Committer: Weiwei Yang 
Committed: Thu Nov 8 17:47:18 2018 +0800

--
 .../resourcemanager/nodelabels/NodeAttributesManagerImpl.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1bbf7dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
index e524788..83c5983 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java
@@ -221,8 +221,10 @@ public class NodeAttributesManagerImpl extends 
NodeAttributesManager {
 
   // Notify RM
   if (rmContext != null && rmContext.getDispatcher() != null) {
-LOG.info("Updated NodeAttribute event to RM:" + newNodeToAttributesMap
-.values());
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Updated NodeAttribute event to RM:"
+  + newNodeToAttributesMap.values());
+}
 rmContext.getDispatcher().getEventHandler().handle(
 new NodeAttributesUpdateSchedulerEvent(newNodeToAttributesMap));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.

2018-11-12 Thread botong
YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by 
Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aec1a5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aec1a5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aec1a5d

Branch: refs/heads/YARN-7402
Commit: 8aec1a5ddd4aa0ac1321fad05e3503dda3c5a14b
Parents: b97ad37
Author: Botong Huang 
Authored: Wed May 23 12:45:32 2018 -0700
Committer: Botong Huang 
Committed: Mon Nov 12 15:09:38 2018 -0800

--
 .../server/globalpolicygenerator/GPGUtils.java  | 31 +---
 1 file changed, 20 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aec1a5d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
index 429bec4..31cee1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
@@ -18,21 +18,22 @@
 
 package org.apache.hadoop.yarn.server.globalpolicygenerator;
 
+import static javax.servlet.http.HttpServletResponse.SC_OK;
+
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
-import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 /**
  * GPGUtils contains utility functions for the GPG.
@@ -53,15 +54,23 @@ public final class GPGUtils {
 T obj = null;
 
 WebResource webResource = client.resource(webAddr);
-ClientResponse response = webResource.path("ws/v1/cluster").path(path)
-.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-if (response.getStatus() == HttpServletResponse.SC_OK) {
-  obj = response.getEntity(returnType);
-} else {
-  throw new YarnRuntimeException("Bad response from remote web service: "
-  + response.getStatus());
+ClientResponse response = null;
+try {
+  response = webResource.path("ws/v1/cluster").path(path)
+  .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+  if (response.getStatus() == SC_OK) {
+obj = response.getEntity(returnType);
+  } else {
+throw new YarnRuntimeException(
+"Bad response from remote web service: " + response.getStatus());
+  }
+  return obj;
+} finally {
+  if (response != null) {
+response.close();
+  }
+  client.destroy();
 }
-return obj;
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: YARN-8902. [CSI] Add volume manager that manages CSI volume lifecycle. Contributed by Weiwei Yang.

2018-11-12 Thread botong
YARN-8902. [CSI] Add volume manager that manages CSI volume lifecycle. 
Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e728444
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e728444
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e728444

Branch: refs/heads/YARN-7402
Commit: 4e7284443e6cf8dac3cd5d2581730c87ae6ffb55
Parents: b5ec85d
Author: Sunil G 
Authored: Mon Nov 12 11:57:02 2018 +0530
Committer: Sunil G 
Committed: Mon Nov 12 11:57:02 2018 +0530

--
 .../volume/csi/CsiAdaptorClientProtocol.java|  34 +++
 .../yarn/server/volume/csi/CsiConstants.java|  37 +++
 .../volume/csi/VolumeCapabilityRange.java   | 107 
 .../hadoop/yarn/server/volume/csi/VolumeId.java |  59 +
 .../yarn/server/volume/csi/VolumeMetaData.java  | 227 +
 .../csi/exception/InvalidVolumeException.java   |  28 +++
 .../volume/csi/exception/VolumeException.java   |  34 +++
 .../exception/VolumeProvisioningException.java  |  32 +++
 .../volume/csi/exception/package-info.java  |  27 ++
 .../yarn/server/volume/csi/package-info.java|  27 ++
 .../resourcemanager/RMActiveServiceContext.java |  14 ++
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  12 +
 .../server/resourcemanager/ResourceManager.java |  14 ++
 .../volume/csi/CsiAdaptorClient.java|  36 +++
 .../volume/csi/VolumeBuilder.java   | 106 
 .../volume/csi/VolumeManager.java   |  63 +
 .../volume/csi/VolumeManagerImpl.java   | 108 
 .../volume/csi/VolumeStates.java|  60 +
 .../csi/event/ControllerPublishVolumeEvent.java |  30 +++
 .../volume/csi/event/ValidateVolumeEvent.java   |  30 +++
 .../volume/csi/event/VolumeEvent.java   |  43 
 .../volume/csi/event/VolumeEventType.java   |  29 +++
 .../volume/csi/event/package-info.java  |  27 ++
 .../volume/csi/lifecycle/Volume.java|  37 +++
 .../volume/csi/lifecycle/VolumeImpl.java| 199 +++
 .../volume/csi/lifecycle/VolumeState.java   |  35 +++
 .../volume/csi/lifecycle/package-info.java  |  27 ++
 .../volume/csi/package-info.java|  27 ++
 .../csi/processor/VolumeAMSProcessor.java   | 158 
 .../volume/csi/processor/package-info.java  |  27 ++
 .../csi/provisioner/VolumeProvisioner.java  |  32 +++
 .../provisioner/VolumeProvisioningResults.java  |  87 +++
 .../csi/provisioner/VolumeProvisioningTask.java |  66 +
 .../volume/csi/provisioner/package-info.java|  27 ++
 .../resourcemanager/volume/package-info.java|  27 ++
 .../volume/csi/TestVolumeCapabilityRange.java   |  67 +
 .../volume/csi/TestVolumeLifecycle.java | 161 
 .../volume/csi/TestVolumeMetaData.java  | 178 +
 .../volume/csi/TestVolumeProcessor.java | 250 +++
 40 files changed, 2594 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e728444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/CsiAdaptorClientProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/CsiAdaptorClientProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/CsiAdaptorClientProtocol.java
new file mode 100644
index 000..b894d4e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/CsiAdaptorClientProtocol.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.volume.csi;
+
+import 

[06/50] [abbrv] hadoop git commit: YARN-8866. Fix a parsing error for crossdomain.xml.

2018-11-12 Thread botong
YARN-8866. Fix a parsing error for crossdomain.xml.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dc1f6db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dc1f6db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dc1f6db

Branch: refs/heads/YARN-7402
Commit: 8dc1f6dbf712a65390a9a6859f62fec0481af31b
Parents: e6444f1
Author: Takanobu Asanuma 
Authored: Wed Nov 7 18:26:07 2018 +0900
Committer: Takanobu Asanuma 
Committed: Wed Nov 7 18:26:07 2018 +0900

--
 .../hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dc1f6db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
index 43a2ea6..a9597e9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/crossdomain.xml
@@ -18,7 +18,12 @@
 * limitations under the License.
 -->
 
-http://www.adobe.com/xml/dtds/cross-domain-policy.dtd;>
+
+  
+  
+  ]>
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: HADOOP-15846. ABFS: fix mask related bugs in setAcl, modifyAclEntries and removeAclEntries.

2018-11-12 Thread botong
HADOOP-15846. ABFS: fix mask related bugs in setAcl, modifyAclEntries and 
removeAclEntries.

Contributed by Junhua Gu.

(cherry picked from commit 66715005f9e8f4f25faa352a06d142b75a029f0e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/724c1500
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/724c1500
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/724c1500

Branch: refs/heads/YARN-7402
Commit: 724c15007beb9ca36f2f37c829f1daa366d95bbf
Parents: 9317a61
Author: Junhua Gu 
Authored: Thu Nov 8 17:21:40 2018 +
Committer: Steve Loughran 
Committed: Thu Nov 8 17:21:40 2018 +

--
 .../fs/azurebfs/AzureBlobFileSystemStore.java   |  20 +--
 .../fs/azurebfs/services/AbfsAclHelper.java |  89 +--
 .../azurebfs/ITestAzureBlobFilesystemAcl.java   | 147 ++-
 3 files changed, 225 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/724c1500/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index 1ac1761..bfdbba8 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -626,17 +626,7 @@ public class AzureBlobFileSystemStore {
 
 final Map aclEntries = 
AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL));
 
-for (Map.Entry modifyAclEntry : 
modifyAclEntries.entrySet()) {
-  aclEntries.put(modifyAclEntry.getKey(), modifyAclEntry.getValue());
-}
-
-if (!modifyAclEntries.containsKey(AbfsHttpConstants.ACCESS_MASK)) {
-  aclEntries.remove(AbfsHttpConstants.ACCESS_MASK);
-}
-
-if (!modifyAclEntries.containsKey(AbfsHttpConstants.DEFAULT_MASK)) {
-  aclEntries.remove(AbfsHttpConstants.DEFAULT_MASK);
-}
+AbfsAclHelper.modifyAclEntriesInternal(aclEntries, modifyAclEntries);
 
 client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, 
true),
 AbfsAclHelper.serializeAclSpec(aclEntries), eTag);
@@ -736,12 +726,8 @@ public class AzureBlobFileSystemStore {
 final String eTag = 
op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG);
 
 final Map getAclEntries = 
AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL));
-for (Map.Entry ace : getAclEntries.entrySet()) {
-  if (ace.getKey().startsWith("default:") && (ace.getKey() != 
AbfsHttpConstants.DEFAULT_MASK)
-  && !aclEntries.containsKey(ace.getKey())) {
-aclEntries.put(ace.getKey(), ace.getValue());
-  }
-}
+
+AbfsAclHelper.setAclEntriesInternal(aclEntries, getAclEntries);
 
 client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, 
true),
 AbfsAclHelper.serializeAclSpec(aclEntries), eTag);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/724c1500/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
index c28da2c..34959a6 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsAclHelper.java
@@ -44,12 +44,17 @@ public final class AbfsAclHelper {
 // not called
   }
 
-  public static Map deserializeAclSpec(final String 
aclSpecString) {
+  public static Map deserializeAclSpec(final String 
aclSpecString) throws AzureBlobFileSystemException {
 final Map aclEntries  = new HashMap<>();
-final String[] aclArray = aclSpecString.split(AbfsHttpConstants.COMMA);
-for (String acl : aclArray) {
-  int idx = acl.lastIndexOf(AbfsHttpConstants.COLON);
-  aclEntries.put(acl.substring(0, idx), acl.substring(idx + 1));
+final String[] aceArray = aclSpecString.split(AbfsHttpConstants.COMMA);
+for (String ace : aceArray) {
+  int idx = ace.lastIndexOf(AbfsHttpConstants.COLON);
+  final String key = ace.substring(0, idx);
+  final String val = ace.substring(idx + 1);
+  if (aclEntries.containsKey(key)) {
+throw new 

[50/50] [abbrv] hadoop git commit: fix version after rebase

2018-11-12 Thread botong
fix version after rebase


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1017a67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1017a67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1017a67

Branch: refs/heads/YARN-7402
Commit: e1017a676b93aa03f2c8821117c711c30aec03c0
Parents: 727c3f8
Author: Botong Huang 
Authored: Mon Nov 12 15:13:42 2018 -0800
Committer: Botong Huang 
Committed: Mon Nov 12 15:13:42 2018 -0800

--
 .../hadoop-yarn-server-globalpolicygenerator/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1017a67/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
index f0097af..b044b88 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -19,12 +19,12 @@
   
 hadoop-yarn-server
 org.apache.hadoop
-3.2.0-SNAPSHOT
+3.3.0-SNAPSHOT
   
   4.0.0
   org.apache.hadoop
   hadoop-yarn-server-globalpolicygenerator
-  3.2.0-SNAPSHOT
+  3.3.0-SNAPSHOT
   hadoop-yarn-server-globalpolicygenerator
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDDS-823. OzoneRestClient is failing with NPE on getKeyDetails call. Contributed by Bharat Viswanadham.

2018-11-12 Thread botong
HDDS-823. OzoneRestClient is failing with NPE on getKeyDetails call. 
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47194fef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47194fef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47194fef

Branch: refs/heads/YARN-7402
Commit: 47194fefdd7a225399342af1960cbd219a9b4763
Parents: 66bf624
Author: Bharat Viswanadham 
Authored: Thu Nov 8 21:28:04 2018 -0800
Committer: Bharat Viswanadham 
Committed: Thu Nov 8 21:28:04 2018 -0800

--
 .../hadoop/ozone/web/storage/DistributedStorageHandler.java | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47194fef/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
--
diff --git 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
index 88f2d6e..a8df114 100644
--- 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
+++ 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.web.storage;
 
 import com.google.common.base.Strings;
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.retry.RetryPolicy;
@@ -486,6 +487,7 @@ public final class DistributedStorageHandler implements 
StorageHandler {
 HddsClientUtils.formatDateTime(omKeyInfo.getCreationTime()));
 keyInfo.setModifiedOn(
 HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime()));
+keyInfo.setType(ReplicationType.valueOf(omKeyInfo.getType().toString()));
 return keyInfo;
   }
 
@@ -510,6 +512,8 @@ public final class DistributedStorageHandler implements 
StorageHandler {
 keyInfoDetails.setModifiedOn(
 HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime()));
 keyInfoDetails.setKeyLocations(keyLocations);
+keyInfoDetails.setType(ReplicationType.valueOf(omKeyInfo.getType()
+.toString()));
 return keyInfoDetails;
   }
 
@@ -553,6 +557,7 @@ public final class DistributedStorageHandler implements 
StorageHandler {
 HddsClientUtils.formatDateTime(info.getCreationTime()));
 tempInfo.setModifiedOn(
 HddsClientUtils.formatDateTime(info.getModificationTime()));
+tempInfo.setType(ReplicationType.valueOf(info.getType().toString()));
 
 result.addKey(tempInfo);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: MAPREDUCE-7148. Fast fail jobs when exceeds dfs quota limitation. Contributed by Wang Yan

2018-11-12 Thread botong
MAPREDUCE-7148. Fast fail jobs when exceeds dfs quota limitation. Contributed 
by Wang Yan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b6625a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b6625a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b6625a9

Branch: refs/heads/YARN-7402
Commit: 0b6625a9735f76ab473b41d8ab9b7f3c7678cfff
Parents: 8dc1f6d
Author: Jason Lowe 
Authored: Wed Nov 7 08:20:49 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 7 08:20:49 2018 -0600

--
 ...ClusterStorageCapacityExceededException.java |  51 
 .../hdfs/protocol/QuotaExceededException.java   |   5 +-
 .../org/apache/hadoop/mapred/YarnChild.java |  28 -
 .../org/apache/hadoop/mapred/TestYarnChild.java | 118 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java|   3 +
 .../src/main/resources/mapred-default.xml   |   9 ++
 6 files changed, 209 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b6625a9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
new file mode 100644
index 000..bbbf073
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Exception raised by HDFS indicating that storage capacity in the
+ * cluster filesystem is exceeded. See also
+ * https://issues.apache.org/jira/browse/MAPREDUCE-7148.
+ */
+@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
+@InterfaceStability.Evolving
+public class ClusterStorageCapacityExceededException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public ClusterStorageCapacityExceededException() {
+super();
+  }
+
+  public ClusterStorageCapacityExceededException(String message) {
+super(message);
+  }
+
+  public ClusterStorageCapacityExceededException(String message,
+  Throwable cause) {
+super(message, cause);
+  }
+
+  public ClusterStorageCapacityExceededException(Throwable cause) {
+super(cause);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b6625a9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
index f4e7f34..7033f3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
@@ -18,10 +18,9 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ClusterStorageCapacityExceededException;
 
 /**
  * This exception is thrown when modification to HDFS results in violation
@@ -37,7 +36,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class QuotaExceededException extends 

[23/50] [abbrv] hadoop git commit: HDDS-733. Create container if not exist, as part of chunk write. Contributed by Lokesh Jain.

2018-11-12 Thread botong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe50b49/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index bde3bc9..7d002c3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -616,4 +616,20 @@ public final class ContainerTestHelper {
 }
 return false;
   }
+
+  public static boolean isContainerPresent(MiniOzoneCluster cluster,
+  long containerID, DatanodeDetails datanode) {
+for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) {
+  if (datanode.equals(datanodeService.getDatanodeDetails())) {
+Container container =
+datanodeService.getDatanodeStateMachine().getContainer()
+.getContainerSet().getContainer(containerID);
+if (container != null) {
+  return true;
+}
+  }
+}
+return false;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe50b49/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 733ed85..98a27bf 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -171,7 +171,6 @@ public class KeyManagerImpl implements KeyManager {
 }
 OmKeyLocationInfo info = new OmKeyLocationInfo.Builder()
 .setBlockID(new BlockID(allocatedBlock.getBlockID()))
-.setShouldCreateContainer(allocatedBlock.getCreateContainer())
 .setLength(scmBlockSize)
 .setOffset(0)
 .build();
@@ -235,7 +234,6 @@ public class KeyManagerImpl implements KeyManager {
 }
 OmKeyLocationInfo subKeyInfo = new OmKeyLocationInfo.Builder()
 .setBlockID(new BlockID(allocatedBlock.getBlockID()))
-.setShouldCreateContainer(allocatedBlock.getCreateContainer())
 .setLength(allocateSize)
 .setOffset(0)
 .build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe50b49/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
index 2076ced..5f8e939 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
@@ -122,8 +122,7 @@ public class ScmBlockLocationTestIngClient implements 
ScmBlockLocationProtocol {
 AllocatedBlock.Builder abb =
 new AllocatedBlock.Builder()
 .setContainerBlockID(new ContainerBlockID(containerID, localID))
-.setPipeline(pipeline)
-.setShouldCreateContainer(false);
+.setPipeline(pipeline);
 return abb.build();
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: YARN-8776. Implement Container Exec feature in LinuxContainerExecutor. Contributed by Eric Yang

2018-11-12 Thread botong
YARN-8776. Implement Container Exec feature in LinuxContainerExecutor. 
Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f9c4f32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f9c4f32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f9c4f32

Branch: refs/heads/YARN-7402
Commit: 1f9c4f32e842529be5980e395587f135452372bb
Parents: 18fe65d
Author: Billie Rinaldi 
Authored: Mon Nov 12 10:41:45 2018 -0800
Committer: Billie Rinaldi 
Committed: Mon Nov 12 10:42:30 2018 -0800

--
 .../server/nodemanager/ContainerExecutor.java   |  2 +-
 .../nodemanager/LinuxContainerExecutor.java | 31 +-
 .../linux/privileged/PrivilegedOperation.java   |  1 +
 .../privileged/PrivilegedOperationExecutor.java | 57 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |  8 +++
 .../DelegatingLinuxContainerRuntime.java| 10 
 .../runtime/DockerLinuxContainerRuntime.java| 45 ++
 .../linux/runtime/docker/DockerExecCommand.java | 62 
 .../runtime/ContainerRuntime.java   | 14 -
 .../executor/ContainerExecContext.java  | 11 ++--
 .../webapp/ContainerShellWebSocket.java | 49 
 .../server/nodemanager/webapp/WebServer.java|  1 +
 .../impl/container-executor.c   |  9 ++-
 .../nodemanager/TestContainerExecutor.java  |  3 +-
 .../nodemanager/TestLinuxContainerExecutor.java |  4 +-
 .../runtime/MockLinuxContainerRuntime.java  |  9 +++
 .../TestContainersMonitorResourceChange.java|  2 +
 17 files changed, 275 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9c4f32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 6024dbf..77b7859 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -38,7 +38,6 @@ import 
java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,6 +58,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext;
 import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9c4f32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 0282f58..db2fed9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -62,15 +62,10 @@ import 

[15/50] [abbrv] hadoop git commit: HDFS-14039. ec -listPolicies doesn't show correct state for the default policy when the default is not RS(6, 3). Contributed by Kitti Nanasi.

2018-11-12 Thread botong
HDFS-14039. ec -listPolicies doesn't show correct state for the default policy 
when the default is not RS(6,3). Contributed by Kitti Nanasi.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d99648c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d99648c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d99648c

Branch: refs/heads/YARN-7402
Commit: 8d99648c203004045a9339ad27258092969145d6
Parents: 724c150
Author: Kitti Nanasi 
Authored: Thu Nov 8 10:00:09 2018 -0800
Committer: Xiao Chen 
Committed: Thu Nov 8 10:01:19 2018 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 119 ++-
 .../server/namenode/FSImageFormatProtobuf.java  |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/namenode/TestEnabledECPolicies.java  | 103 +++-
 .../hdfs/server/namenode/TestFSImage.java   |  42 +--
 .../server/namenode/TestNamenodeRetryCache.java |   2 +-
 .../server/namenode/TestStripedINodeFile.java   |   2 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   2 +-
 8 files changed, 231 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d99648c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index d2bf3af..57fa958 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -81,6 +82,15 @@ public final class ErasureCodingPolicyManager {
   private ErasureCodingPolicyInfo[] allPolicies;
 
   /**
+   * All policies in the state as it will be persisted in the fsimage.
+   *
+   * The difference between persisted policies and all policies is that
+   * if a default policy is only enabled at startup,
+   * it will appear as disabled in the persisted policy list and in the 
fsimage.
+   */
+  private Map allPersistedPolicies;
+
+  /**
* All enabled policies sorted by name for fast querying, including built-in
* policy, user defined policy.
*/
@@ -90,6 +100,7 @@ public final class ErasureCodingPolicyManager {
*/
   private ErasureCodingPolicy[] enabledPolicies;
 
+  private String defaultPolicyName;
 
   private volatile static ErasureCodingPolicyManager instance = null;
 
@@ -102,14 +113,11 @@ public final class ErasureCodingPolicyManager {
 
   private ErasureCodingPolicyManager() {}
 
-  public void init(Configuration conf) {
-// Load erasure coding default policy
-final String defaultPolicyName = conf.getTrimmed(
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
+  public void init(Configuration conf) throws IOException {
 this.policiesByName = new TreeMap<>();
 this.policiesByID = new TreeMap<>();
 this.enabledPoliciesByName = new TreeMap<>();
+this.allPersistedPolicies = new TreeMap<>();
 
 /**
  * TODO: load user defined EC policy from fsImage HDFS-7859
@@ -125,31 +133,12 @@ public final class ErasureCodingPolicyManager {
   final ErasureCodingPolicyInfo info = new ErasureCodingPolicyInfo(policy);
   policiesByName.put(policy.getName(), info);
   policiesByID.put(policy.getId(), info);
+  allPersistedPolicies.put(policy.getId(),
+  new ErasureCodingPolicyInfo(policy));
 }
 
-if (!defaultPolicyName.isEmpty()) {
-  final ErasureCodingPolicyInfo info =
-  policiesByName.get(defaultPolicyName);
-  if (info == null) {
-String names = policiesByName.values()
-.stream().map((pi) -> pi.getPolicy().getName())
-.collect(Collectors.joining(", "));
-String msg = String.format("EC policy '%s' specified at %s is not a "
-+ "valid policy. Please choose from list of available "
-+ "policies: [%s]",
-defaultPolicyName,
-DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
-names);
-throw new HadoopIllegalArgumentException(msg);
-  }
-  

[28/50] [abbrv] hadoop git commit: YARN-8902. [CSI] Add volume manager that manages CSI volume lifecycle. Contributed by Weiwei Yang.

2018-11-12 Thread botong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e728444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
new file mode 100644
index 000..f275768
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.volume.csi.processor;
+
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.VolumeManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.volume.csi.lifecycle.Volume;
+import 
org.apache.hadoop.yarn.server.resourcemanager.volume.csi.lifecycle.VolumeImpl;
+import 
org.apache.hadoop.yarn.server.resourcemanager.volume.csi.provisioner.VolumeProvisioningResults;
+import 
org.apache.hadoop.yarn.server.resourcemanager.volume.csi.provisioner.VolumeProvisioningTask;
+import org.apache.hadoop.yarn.server.volume.csi.VolumeMetaData;
+import 
org.apache.hadoop.yarn.server.volume.csi.exception.InvalidVolumeException;
+import org.apache.hadoop.yarn.server.volume.csi.exception.VolumeException;
+import 
org.apache.hadoop.yarn.server.volume.csi.exception.VolumeProvisioningException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * AMS processor that handles volume resource requests.
+ *
+ */
+public class VolumeAMSProcessor implements ApplicationMasterServiceProcessor {
+
+  private static final Logger LOG =  LoggerFactory
+  .getLogger(VolumeAMSProcessor.class);
+
+  private ApplicationMasterServiceProcessor nextAMSProcessor;
+  private VolumeManager volumeManager;
+
+  @Override
+  public void init(ApplicationMasterServiceContext amsContext,
+  ApplicationMasterServiceProcessor nextProcessor) {
+LOG.info("Initializing CSI volume processor");
+this.nextAMSProcessor = nextProcessor;
+this.volumeManager = ((RMContext) amsContext).getVolumeManager();
+  }
+
+  @Override
+  public void registerApplicationMaster(
+  ApplicationAttemptId applicationAttemptId,
+  RegisterApplicationMasterRequest request,
+  RegisterApplicationMasterResponse response)
+  throws IOException, YarnException {
+this.nextAMSProcessor.registerApplicationMaster(applicationAttemptId,
+request, response);

[05/50] [abbrv] hadoop git commit: HADOOP-15907. Add missing maven modules in BUILDING.txt. Contributed Wanqiang Ji.

2018-11-12 Thread botong
HADOOP-15907. Add missing maven modules in BUILDING.txt. Contributed Wanqiang 
Ji.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6444f1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6444f1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6444f1c

Branch: refs/heads/YARN-7402
Commit: e6444f1c640cda86e1c97fbfebf68de92a162c95
Parents: addec29
Author: Weiwei Yang 
Authored: Wed Nov 7 16:45:16 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Nov 7 16:45:16 2018 +0800

--
 BUILDING.txt | 30 +++---
 1 file changed, 19 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6444f1c/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9727004..d35e3af 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -93,17 +93,25 @@ Optional packages:
 
--
 Maven main modules:
 
-  hadoop(Main Hadoop project)
- - hadoop-project   (Parent POM for all Hadoop Maven modules.  
   )
-(All plugins & dependencies versions are 
defined here.)
- - hadoop-project-dist  (Parent POM for modules that generate 
distributions.)
- - hadoop-annotations   (Generates the Hadoop doclet used to 
generated the Javadocs)
- - hadoop-assemblies(Maven assemblies used by the different 
modules)
- - hadoop-common-project(Hadoop Common)
- - hadoop-hdfs-project  (Hadoop HDFS)
- - hadoop-mapreduce-project (Hadoop MapReduce)
- - hadoop-tools (Hadoop tools like Streaming, Distcp, etc.)
- - hadoop-dist  (Hadoop distribution assembler)
+  hadoop(Main Hadoop project)
+ - hadoop-project   (Parent POM for all Hadoop Maven 
modules. )
+(All plugins & dependencies versions 
are defined here.)
+ - hadoop-project-dist  (Parent POM for modules that generate 
distributions.)
+ - hadoop-annotations   (Generates the Hadoop doclet used to 
generated the Javadocs)
+ - hadoop-assemblies(Maven assemblies used by the 
different modules)
+ - hadoop-maven-plugins (Maven plugins used in project)
+ - hadoop-build-tools   (Build tools like checkstyle, etc.)
+ - hadoop-common-project(Hadoop Common)
+ - hadoop-hdfs-project  (Hadoop HDFS)
+ - hadoop-yarn-project  (Hadoop YARN)
+ - hadoop-mapreduce-project (Hadoop MapReduce)
+ - hadoop-ozone (Hadoop Ozone)
+ - hadoop-hdds  (Hadoop Distributed Data Store)
+ - hadoop-tools (Hadoop tools like Streaming, Distcp, 
etc.)
+ - hadoop-dist  (Hadoop distribution assembler)
+ - hadoop-client-modules(Hadoop client modules)
+ - hadoop-minicluster   (Hadoop minicluster artifacts)
+ - hadoop-cloud-storage-project (Generates artifacts to access cloud 
storage like aws, azure, etc.)
 
 
--
 Where to run Maven from?


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: Updating GPG module pom version post rebase.

2018-11-12 Thread botong
Updating GPG module pom version post rebase.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2488fad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2488fad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2488fad

Branch: refs/heads/YARN-7402
Commit: d2488fad237343d3dc1982876a6b6736027c40c6
Parents: 8aec1a5
Author: Subru Krishnan 
Authored: Wed May 30 12:59:22 2018 -0700
Committer: Botong Huang 
Committed: Mon Nov 12 15:09:38 2018 -0800

--
 .../hadoop-yarn-server-globalpolicygenerator/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2488fad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
index 9398b0b..c137c9e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -19,12 +19,12 @@
   
 hadoop-yarn-server
 org.apache.hadoop
-3.1.0-SNAPSHOT
+3.2.0-SNAPSHOT
   
   4.0.0
   org.apache.hadoop
   hadoop-yarn-server-globalpolicygenerator
-  3.1.0-SNAPSHOT
+  3.2.0-SNAPSHOT
   hadoop-yarn-server-globalpolicygenerator
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: YARN-8862. [GPG] Add Yarn Registry cleanup in ApplicationCleaner. Contributed by Botong Huang.

2018-11-12 Thread botong
YARN-8862. [GPG] Add Yarn Registry cleanup in ApplicationCleaner. Contributed 
by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/727c3f80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/727c3f80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/727c3f80

Branch: refs/heads/YARN-7402
Commit: 727c3f8079fe586cbd9c1f8aecdcf40d8ddbe66d
Parents: 1facc8a
Author: Botong Huang 
Authored: Thu Oct 18 10:26:16 2018 -0700
Committer: Botong Huang 
Committed: Mon Nov 12 15:11:53 2018 -0800

--
 .../utils/FederationRegistryClient.java | 18 +++
 .../utils/TestFederationRegistryClient.java | 31 ++-
 .../globalpolicygenerator/GPGContext.java   |  5 +++
 .../globalpolicygenerator/GPGContextImpl.java   | 12 
 .../GlobalPolicyGenerator.java  | 21 +
 .../applicationcleaner/ApplicationCleaner.java  | 19 +++-
 .../DefaultApplicationCleaner.java  |  2 ++
 .../TestDefaultApplicationCleaner.java  | 32 
 .../amrmproxy/FederationInterceptor.java|  6 ++--
 9 files changed, 136 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/727c3f80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
index 13545c9..aa384c1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java
@@ -202,21 +202,27 @@ public class FederationRegistryClient {
* Remove an application from registry.
*
* @param appId application id
+   * @param ignoreMemoryState whether to ignore the memory data in terms of
+   *  known application
*/
-  public synchronized void removeAppFromRegistry(ApplicationId appId) {
+  public synchronized void removeAppFromRegistry(ApplicationId appId,
+  boolean ignoreMemoryState) {
 Map> subClusterTokenMap =
 this.appSubClusterTokenMap.get(appId);
-LOG.info("Removing all registry entries for {}", appId);
-
-if (subClusterTokenMap == null || subClusterTokenMap.size() == 0) {
-  return;
+if (!ignoreMemoryState) {
+  if (subClusterTokenMap == null || subClusterTokenMap.size() == 0) {
+return;
+  }
 }
+LOG.info("Removing all registry entries for {}", appId);
 
 // Lastly remove the application directory
 String key = getRegistryKey(appId, null);
 try {
   removeKeyRegistry(this.registry, this.user, key, true, true);
-  subClusterTokenMap.clear();
+  if (subClusterTokenMap != null) {
+subClusterTokenMap.clear();
+  }
 } catch (YarnException e) {
   LOG.error("Failed removing registry directory key " + key, e);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/727c3f80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
index 42be851..5b799a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationRegistryClient.java
@@ -80,11 +80,40 @@ public class TestFederationRegistryClient {
 Assert.assertEquals(2,
 this.registryClient.loadStateFromRegistry(appId).size());
 
-this.registryClient.removeAppFromRegistry(appId);
+

[18/50] [abbrv] hadoop git commit: YARN-8990. Fix fair scheduler race condition in app submit and queue cleanup. (Contributed by Wilfred Spiegelenburg)

2018-11-12 Thread botong
YARN-8990. Fix fair scheduler race condition in app submit and queue cleanup. 
(Contributed by Wilfred Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/524a7523
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/524a7523
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/524a7523

Branch: refs/heads/YARN-7402
Commit: 524a7523c427b55273133078898ae3535897bada
Parents: 89b4916
Author: Haibo Chen 
Authored: Thu Nov 8 16:02:48 2018 -0800
Committer: Haibo Chen 
Committed: Thu Nov 8 16:02:48 2018 -0800

--
 .../scheduler/fair/FSLeafQueue.java |  14 +++
 .../scheduler/fair/FairScheduler.java   |  19 +++-
 .../scheduler/fair/QueueManager.java| 113 +--
 3 files changed, 104 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/524a7523/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 7e4dab8..a038887 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -651,4 +651,18 @@ public class FSLeafQueue extends FSQueue {
   writeLock.unlock();
 }
   }
+
+  /**
+   * This method is called when an application is removed from this queue
+   * during the submit process.
+   * @param applicationId the application's id
+   */
+  public void removeAssignedApp(ApplicationId applicationId) {
+writeLock.lock();
+try {
+  assignedApps.remove(applicationId);
+} finally {
+  writeLock.unlock();
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/524a7523/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index da5e4c9..e5d2a06 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -473,7 +473,7 @@ public class FairScheduler extends
 writeLock.lock();
 try {
   RMApp rmApp = rmContext.getRMApps().get(applicationId);
-  FSLeafQueue queue = assignToQueue(rmApp, queueName, user);
+  FSLeafQueue queue = assignToQueue(rmApp, queueName, user, applicationId);
   if (queue == null) {
 return;
   }
@@ -499,6 +499,7 @@ public class FairScheduler extends
   applicationId, queue.getName(),
   invalidAMResourceRequests, queue.getMaxShare());
   rejectApplicationWithMessage(applicationId, msg);
+  queue.removeAssignedApp(applicationId);
   return;
 }
   }
@@ -513,6 +514,7 @@ public class FairScheduler extends
 + " cannot submit applications to queue " + queue.getName()
 + "(requested queuename is " + queueName + ")";
 rejectApplicationWithMessage(applicationId, msg);
+queue.removeAssignedApp(applicationId);
 return;
   }
 
@@ -520,7 +522,6 @@ public class FairScheduler extends
   new SchedulerApplication(queue, user);
   applications.put(applicationId, application);
   queue.getMetrics().submitApp(user);
-  queue.addAssignedApp(applicationId);
 
   LOG.info("Accepted application " + applicationId + " from user: " + user
   + ", in 

[26/50] [abbrv] hadoop git commit: YARN-9002. Improve keytab loading for YARN Service. Contributed by Gour Saha

2018-11-12 Thread botong
YARN-9002.  Improve keytab loading for YARN Service.
Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26642487
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26642487
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26642487

Branch: refs/heads/YARN-7402
Commit: 2664248797365761089a86d5bd59aa9ac3ebcc28
Parents: 298d250
Author: Eric Yang 
Authored: Sat Nov 10 01:52:19 2018 -0500
Committer: Eric Yang 
Committed: Sat Nov 10 01:52:19 2018 -0500

--
 .../yarn/service/client/ServiceClient.java  | 38 
 .../exceptions/RestApiErrorMessages.java|  2 --
 .../yarn/service/utils/ServiceApiUtil.java  | 17 -
 .../yarn/service/utils/TestServiceApiUtil.java  | 25 +++--
 4 files changed, 19 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26642487/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 91d6367..1158e44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1392,31 +1392,21 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   throw new YarnException(e);
 }
 
-if (keytabURI.getScheme() != null) {
-  switch (keytabURI.getScheme()) {
-  case "hdfs":
-Path keytabOnhdfs = new Path(keytabURI);
-if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) {
-  LOG.warn(service.getName() + "'s keytab (principalName = "
-  + principalName + ") doesn't exist at: " + keytabOnhdfs);
-  return;
-}
-LocalResource keytabRes = fileSystem.createAmResource(keytabOnhdfs,
-LocalResourceType.FILE);
-localResource.put(String.format(YarnServiceConstants.KEYTAB_LOCATION,
-service.getName()), keytabRes);
-LOG.info("Adding " + service.getName() + "'s keytab for "
-+ "localization, uri = " + keytabOnhdfs);
-break;
-  case "file":
-LOG.info("Using a keytab from localhost: " + keytabURI);
-break;
-  default:
-LOG.warn("Unsupported keytab URI scheme " + keytabURI);
-break;
-  }
+if ("file".equals(keytabURI.getScheme())) {
+  LOG.info("Using a keytab from localhost: " + keytabURI);
 } else {
-  LOG.warn("Unsupported keytab URI scheme " + keytabURI);
+  Path keytabOnhdfs = new Path(keytabURI);
+  if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) {
+LOG.warn(service.getName() + "'s keytab (principalName = "
++ principalName + ") doesn't exist at: " + keytabOnhdfs);
+return;
+  }
+  LocalResource keytabRes = fileSystem.createAmResource(keytabOnhdfs,
+  LocalResourceType.FILE);
+  localResource.put(String.format(YarnServiceConstants.KEYTAB_LOCATION,
+  service.getName()), keytabRes);
+  LOG.info("Adding " + service.getName() + "'s keytab for "
+  + "localization, uri = " + keytabOnhdfs);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26642487/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 8f831ee..57c6449 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ 

[01/50] [abbrv] hadoop git commit: MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. Contributed by Peter Bacsko [Forced Update!]

2018-11-12 Thread botong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 1ca57be32 -> e1017a676 (forced update)


MAPREDUCE-7156. NullPointerException when reaching max shuffle connections. 
Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba1f9d66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba1f9d66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba1f9d66

Branch: refs/heads/YARN-7402
Commit: ba1f9d66d94ed0b85084d7c40c09a87478b3a05a
Parents: 08d69d9
Author: Jason Lowe 
Authored: Tue Nov 6 17:55:51 2018 -0600
Committer: Jason Lowe 
Committed: Tue Nov 6 17:55:51 2018 -0600

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1f9d66/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index aeda9cc..c222685 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -910,6 +910,8 @@ public class ShuffleHandler extends AuxiliaryService {
 @Override
 public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt) 
 throws Exception {
+  super.channelOpen(ctx, evt);
+
   if ((maxShuffleConnections > 0) && (accepted.size() >= 
maxShuffleConnections)) {
 LOG.info(String.format("Current number of shuffle connections (%d) is 
" + 
 "greater than or equal to the max allowed shuffle connections 
(%d)", 
@@ -925,8 +927,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
   accepted.add(evt.getChannel());
-  super.channelOpen(ctx, evt);
- 
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: HDDS-809. Refactor SCMChillModeManager.

2018-11-12 Thread botong
HDDS-809. Refactor SCMChillModeManager.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/addec292
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/addec292
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/addec292

Branch: refs/heads/YARN-7402
Commit: addec29297e61a417f0ce711bd76b6db53d504eb
Parents: 482716e
Author: Yiqun Lin 
Authored: Wed Nov 7 13:53:28 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Nov 7 13:54:08 2018 +0800

--
 .../org/apache/hadoop/hdds/scm/ScmUtils.java|   2 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   2 +-
 .../hdds/scm/chillmode/ChillModeExitRule.java   |  32 ++
 .../hdds/scm/chillmode/ChillModePrecheck.java   |  68 
 .../scm/chillmode/ChillModeRestrictedOps.java   |  41 +++
 .../scm/chillmode/ContainerChillModeRule.java   | 112 +++
 .../scm/chillmode/DataNodeChillModeRule.java|  83 +
 .../hadoop/hdds/scm/chillmode/Precheck.java |  29 ++
 .../hdds/scm/chillmode/SCMChillModeManager.java | 153 +
 .../hadoop/hdds/scm/chillmode/package-info.java |  18 ++
 .../hdds/scm/server/ChillModePrecheck.java  |  69 
 .../apache/hadoop/hdds/scm/server/Precheck.java |  29 --
 .../hdds/scm/server/SCMChillModeManager.java| 319 ---
 .../scm/server/SCMClientProtocolServer.java |   1 +
 .../scm/server/StorageContainerManager.java |   1 +
 .../scm/chillmode/TestSCMChillModeManager.java  | 215 +
 .../scm/server/TestSCMChillModeManager.java | 215 -
 .../hadoop/ozone/om/TestScmChillMode.java   |   2 +-
 18 files changed, 756 insertions(+), 635 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/addec292/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
index 435f0a5..43b4452 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
+import org.apache.hadoop.hdds.scm.chillmode.Precheck;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.server.Precheck;
 
 /**
  * SCM utility class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/addec292/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index c878d97..85658b9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmUtils;
+import org.apache.hadoop.hdds.scm.chillmode.ChillModePrecheck;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.server.ChillModePrecheck;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.metrics2.util.MBeans;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/addec292/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
new file mode 100644
index 000..d283dfe
--- /dev/null
+++ 

[43/50] [abbrv] hadoop git commit: YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)

2018-11-12 Thread botong
YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eac28b59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eac28b59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eac28b59

Branch: refs/heads/YARN-7402
Commit: eac28b599575be5291f89faa46fa081b3f7b0676
Parents: af0c804
Author: Botong Huang 
Authored: Thu Feb 1 14:43:48 2018 -0800
Committer: Botong Huang 
Committed: Mon Nov 12 15:09:38 2018 -0800

--
 .../dev-support/findbugs-exclude.xml|   5 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  18 +++
 .../src/main/resources/yarn-default.xml |  24 
 .../store/impl/MemoryFederationStateStore.java  |  13 ++
 .../utils/FederationStateStoreFacade.java   |  41 ++-
 .../GlobalPolicyGenerator.java  |  92 ++-
 .../subclustercleaner/SubClusterCleaner.java| 109 +
 .../subclustercleaner/package-info.java |  19 +++
 .../TestSubClusterCleaner.java  | 118 +++
 9 files changed, 409 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eac28b59/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index dd42129..507e116 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -387,6 +387,11 @@
 
 
   
+  
+
+
+
+  
  
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eac28b59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e88d594..0b35b9f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3427,6 +3427,24 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
   false;
 
+  private static final String FEDERATION_GPG_PREFIX =
+  FEDERATION_PREFIX + "gpg.";
+
+  // The number of threads to use for the GPG scheduled executor service
+  public static final String GPG_SCHEDULED_EXECUTOR_THREADS =
+  FEDERATION_GPG_PREFIX + "scheduled.executor.threads";
+  public static final int DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS = 10;
+
+  // The interval at which the subcluster cleaner runs, -1 means disabled
+  public static final String GPG_SUBCLUSTER_CLEANER_INTERVAL_MS =
+  FEDERATION_GPG_PREFIX + "subcluster.cleaner.interval-ms";
+  public static final long DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = -1;
+
+  // The expiration time for a subcluster heartbeat, default is 30 minutes
+  public static final String GPG_SUBCLUSTER_EXPIRATION_MS =
+  FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
+  public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180;
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eac28b59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index f5493bc..4083ff0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3647,6 +3647,30 @@
 
   
 
+  The number of threads to use for the GPG scheduled executor service.
+
+yarn.federation.gpg.scheduled.executor.threads
+10
+  
+
+  
+
+  The interval at which the subcluster cleaner runs, -1 means disabled.
+
+yarn.federation.gpg.subcluster.cleaner.interval-ms
+-1
+  
+
+  
+
+  The expiration time for a subcluster heartbeat, default is 30 minutes.
+
+

[37/50] [abbrv] hadoop git commit: HDDS-709. Modify Close Container handling sequence on datanodes. Contributed by Shashikant Banerjee.

2018-11-12 Thread botong
HDDS-709. Modify Close Container handling sequence on datanodes. Contributed by 
Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f944f338
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f944f338
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f944f338

Branch: refs/heads/YARN-7402
Commit: f944f3383246450a1aa2b34f55f99a9e86e10c42
Parents: 1f9c4f3
Author: Jitendra Pandey 
Authored: Mon Nov 12 14:08:39 2018 -0800
Committer: Jitendra Pandey 
Committed: Mon Nov 12 14:08:39 2018 -0800

--
 .../helpers/ContainerNotOpenException.java  |  36 +++
 .../helpers/InvalidContainerStateException.java |  35 ++
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../container/common/impl/HddsDispatcher.java   | 106 ---
 .../common/interfaces/ContainerDispatcher.java  |  10 ++
 .../CloseContainerCommandHandler.java   |  28 +++--
 .../server/ratis/ContainerStateMachine.java |  11 ++
 .../container/keyvalue/KeyValueHandler.java |  33 +++---
 .../ozone/client/io/ChunkGroupOutputStream.java |  14 ++-
 .../rpc/TestCloseContainerHandlingByClient.java |   2 +-
 .../rpc/TestContainerStateMachineFailures.java  |   6 +-
 .../transport/server/ratis/TestCSMMetrics.java  |   6 ++
 .../container/server/TestContainerServer.java   |   6 ++
 13 files changed, 255 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f944f338/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
new file mode 100644
index 000..4e406e6
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+
+/**
+ * Exceptions thrown when a write/update opearation is done on non-open
+ * container.
+ */
+public class ContainerNotOpenException extends StorageContainerException {
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the {@link #getMessage()} method)
+   */
+  public ContainerNotOpenException(String message) {
+super(message, ContainerProtos.Result.CONTAINER_NOT_OPEN);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f944f338/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
new file mode 100644
index 000..1378d1a
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *

[33/50] [abbrv] hadoop git commit: YARN-8877. [CSI] Extend service spec to allow setting resource attributes. Contributed by Weiwei Yang.

2018-11-12 Thread botong
YARN-8877. [CSI] Extend service spec to allow setting resource attributes. 
Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42f3a708
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42f3a708
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42f3a708

Branch: refs/heads/YARN-7402
Commit: 42f3a7082a90bc71f0e86dc1e50b0c77b05489cf
Parents: 3c9d97b
Author: Sunil G 
Authored: Mon Nov 12 21:09:30 2018 +0530
Committer: Sunil G 
Committed: Mon Nov 12 21:09:30 2018 +0530

--
 .../yarn/api/records/ResourceInformation.java   |  7 +++
 .../api/records/ResourceInformation.java| 18 
 .../yarn/service/component/Component.java   |  3 +-
 .../hadoop/yarn/service/TestServiceAM.java  | 47 
 .../yarn/service/conf/ExampleAppJson.java   |  1 +
 .../yarn/service/conf/TestAppJsonResolve.java   | 18 
 .../yarn/service/conf/examples/external3.json   | 26 +++
 .../markdown/yarn-service/YarnServiceAPI.md |  2 +-
 8 files changed, 120 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42f3a708/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 057e94e..047c09a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -276,6 +276,13 @@ public class ResourceInformation implements 
Comparable {
   }
 
   public static ResourceInformation newInstance(String name, String units,
+  long value, Map attributes) {
+return ResourceInformation
+.newInstance(name, units, value, ResourceTypes.COUNTABLE, 0L,
+Long.MAX_VALUE, null, attributes);
+  }
+
+  public static ResourceInformation newInstance(String name, String units,
   ResourceTypes resourceType) {
 return ResourceInformation.newInstance(name, units, 0L, resourceType, 0L,
 Long.MAX_VALUE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42f3a708/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
index 103fffb..e466ce7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
@@ -18,10 +18,12 @@
 package org.apache.hadoop.yarn.service.api.records;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.common.collect.ImmutableMap;
 import com.google.gson.annotations.SerializedName;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 
+import java.util.Map;
 import java.util.Objects;
 
 /**
@@ -35,11 +37,25 @@ public class ResourceInformation {
   @SerializedName("unit")
   private String unit = null;
 
+  @SerializedName("attributes")
+  private Map attributes = null;
+
   public ResourceInformation value(Long value) {
 this.value = value;
 return this;
   }
 
+  @ApiModelProperty(value = "")
+  @JsonProperty("attributes")
+  public Map getAttributes() {
+return attributes == null ? ImmutableMap.of() : attributes;
+  }
+
+  public ResourceInformation attributes(Map attributes) {
+this.attributes = attributes;
+return this;
+  }
+
   /**
* Integer value of the resource.
*
@@ -98,6 +114,8 @@ public class ResourceInformation {
 sb.append("class ResourceInformation {\n");
 sb.append("value: ").append(toIndentedString(value)).append("\n");
 sb.append("unit: 

[45/50] [abbrv] hadoop git commit: YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino)

2018-11-12 Thread botong
YARN-3660. [GPG] Federation Global Policy Generator (service hook only). 
(Contributed by Botong Huang via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af0c804b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af0c804b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af0c804b

Branch: refs/heads/YARN-7402
Commit: af0c804b9f1c93a0334674a368cae3f19e6bb2b8
Parents: e269c3f
Author: Carlo Curino 
Authored: Thu Jan 18 17:21:06 2018 -0800
Committer: Botong Huang 
Committed: Mon Nov 12 15:09:38 2018 -0800

--
 hadoop-project/pom.xml  |   6 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn|   5 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd|  55 +---
 .../hadoop-yarn/conf/yarn-env.sh|  12 ++
 .../pom.xml |  98 +
 .../globalpolicygenerator/GPGContext.java   |  31 +
 .../globalpolicygenerator/GPGContextImpl.java   |  41 ++
 .../GlobalPolicyGenerator.java  | 136 +++
 .../globalpolicygenerator/package-info.java |  19 +++
 .../TestGlobalPolicyGenerator.java  |  38 ++
 .../hadoop-yarn/hadoop-yarn-server/pom.xml  |   1 +
 hadoop-yarn-project/pom.xml |   4 +
 12 files changed, 424 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af0c804b/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c985d7b..6a5e0c3 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -453,6 +453,12 @@
 
   
 org.apache.hadoop
+hadoop-yarn-server-globalpolicygenerator
+${project.version}
+  
+
+  
+org.apache.hadoop
 hadoop-yarn-services-core
 ${hadoop.version}
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af0c804b/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 3ec0311..0c320dc 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -39,6 +39,7 @@ function hadoop_usage
   hadoop_add_subcommand "container" client "prints container(s) report"
   hadoop_add_subcommand "daemonlog" admin "get/set the log level for each 
daemon"
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment 
variables"
+  hadoop_add_subcommand "globalpolicygenerator" daemon "run the Global Policy 
Generator"
   hadoop_add_subcommand "jar " client "run a jar file"
   hadoop_add_subcommand "logs" client "dump container logs"
   hadoop_add_subcommand "node" admin "prints node report(s)"
@@ -104,6 +105,10 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
   echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
   exit 0
 ;;
+globalpolicygenerator)
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator'
+;;
 jar)
   HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af0c804b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index e1ac112..bebfd71 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -134,6 +134,10 @@ if "%1" == "--loglevel" (
 set 
CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes
   )
 
+  if exist 
%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes 
(
+set 
CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes
+  )
+
   if exist %HADOOP_YARN_HOME%\build\test\classes (
 set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes
   )
@@ -155,7 +159,7 @@ if "%1" == "--loglevel" (
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar 
^
  application applicationattempt container node queue logs daemonlog 
historyserver ^
- timelineserver timelinereader router classpath
+ timelineserver timelinereader router globalpolicygenerator classpath
   for %%i in ( %yarncommands% ) do (
 if %yarn-command% == %%i set yarncommand=true
   )
@@ -259,7 +263,13 @@ goto :eof
 :router
   set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\router-config\log4j.properties
   set CLASS=org.apache.hadoop.yarn.server.router.Router
- 

[16/50] [abbrv] hadoop git commit: HDDS-806. Update Ratis to latest snapshot version in ozone. Contributed by Tsz Wo Nicholas Sze and Mukul Kumar Singh.

2018-11-12 Thread botong
HDDS-806. Update Ratis to latest snapshot version in ozone. Contributed by Tsz 
Wo Nicholas Sze and Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31614bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31614bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31614bcc

Branch: refs/heads/YARN-7402
Commit: 31614bcc7cda614c45769aa779a839b25c375db2
Parents: 8d99648
Author: Shashikant Banerjee 
Authored: Fri Nov 9 00:05:45 2018 +0530
Committer: Shashikant Banerjee 
Committed: Fri Nov 9 00:05:45 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  8 
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 10 
 .../common/src/main/resources/ozone-default.xml | 15 ++
 .../server/ratis/ContainerStateMachine.java | 48 +---
 .../server/ratis/XceiverServerRatis.java| 11 +
 hadoop-hdds/pom.xml |  2 +-
 hadoop-ozone/pom.xml|  2 +-
 7 files changed, 67 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 38eec61..cedcc43 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -85,6 +85,14 @@ public final class ScmConfigKeys {
   public static final TimeDuration
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
   TimeDuration.valueOf(10, TimeUnit.SECONDS);
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+  "dfs.container.ratis.statemachinedata.sync.retries";
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
+  "dfs.container.ratis.log.queue.size";
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT = 128;
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
   "dfs.ratis.client.request.timeout.duration";
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 54b1cf8..9776817 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -261,6 +261,16 @@ public final class OzoneConfigKeys {
   public static final TimeDuration
   DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
   ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES;
+  public static final int
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE;
+  public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT;
   public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
   ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31614bcc/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5ff60eb..2ffc2ab 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -60,6 +60,21 @@
 
   
   
+dfs.container.ratis.statemachinedata.sync.retries
+-1
+OZONE, DEBUG, CONTAINER, RATIS
+Number of times the WriteStateMachineData op will be tried
+  before failing, if this value is -1, then this retries indefinitely.
+
+  
+  
+

[40/50] [abbrv] hadoop git commit: fix build after rebase

2018-11-12 Thread botong
fix build after rebase


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a75812e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a75812e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a75812e

Branch: refs/heads/YARN-7402
Commit: 6a75812e75f935382e4c5e260d3d7bf12cc98fc3
Parents: d2488fa
Author: Botong Huang 
Authored: Fri Jul 13 21:29:19 2018 -0700
Committer: Botong Huang 
Committed: Mon Nov 12 15:09:38 2018 -0800

--
 .../yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java   | 2 +-
 .../globalpolicygenerator/subclustercleaner/SubClusterCleaner.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a75812e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
index 88b9f2b..1ae07f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -22,7 +22,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.service.CompositeService;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a75812e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
index dad5121..6410a6d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
@@ -21,7 +21,7 @@ package 
org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;
 import java.util.Date;
 import java.util.Map;
 
-import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: HDDS-737. Introduce Incremental Container Report. Contributed by Nanda kumar.

2018-11-12 Thread botong
HDDS-737. Introduce Incremental Container Report.
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c80f753b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c80f753b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c80f753b

Branch: refs/heads/YARN-7402
Commit: c80f753b0e95eb722a972f836c1e4d16fd823434
Parents: e1bbf7d
Author: Nanda kumar 
Authored: Thu Nov 8 18:33:38 2018 +0530
Committer: Nanda kumar 
Committed: Thu Nov 8 18:33:38 2018 +0530

--
 .../hdds/scm/container/ContainerInfo.java   |   8 +
 .../statemachine/DatanodeStateMachine.java  |  17 +-
 .../common/statemachine/StateContext.java   |  13 +-
 .../CloseContainerCommandHandler.java   |  73 +++--
 .../states/endpoint/HeartbeatEndpointTask.java  |  12 +-
 .../StorageContainerDatanodeProtocol.proto  |   2 +-
 .../scm/command/CommandStatusReportHandler.java |   4 -
 .../container/CloseContainerEventHandler.java   |  27 --
 .../scm/container/CloseContainerWatcher.java| 101 ---
 .../hdds/scm/container/ContainerManager.java|  11 -
 .../hdds/scm/container/ContainerReplica.java|  30 +-
 .../scm/container/ContainerReportHandler.java   | 202 +
 .../IncrementalContainerReportHandler.java  |  98 +++
 .../hdds/scm/container/SCMContainerManager.java | 107 ---
 .../scm/container/states/ContainerStateMap.java |   7 -
 .../hadoop/hdds/scm/events/SCMEvents.java   |  34 +--
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   |  37 ++-
 .../hadoop/hdds/scm/node/NewNodeHandler.java|  16 +-
 .../hadoop/hdds/scm/node/NodeManager.java   |  50 +---
 .../hadoop/hdds/scm/node/NodeReportHandler.java |   2 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  |  69 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java|  66 +
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  |   2 +-
 .../hdds/scm/node/states/NodeStateMap.java  |  67 ++---
 .../server/SCMDatanodeHeartbeatDispatcher.java  |  17 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |   6 +-
 .../scm/server/StorageContainerManager.java |  21 +-
 .../command/TestCommandStatusReportHandler.java |  14 -
 .../hdds/scm/container/MockNodeManager.java |  66 ++---
 .../container/TestContainerReportHandler.java   |  15 +-
 .../container/TestContainerStateManager.java|   3 +
 .../scm/container/TestSCMContainerManager.java  | 107 +--
 .../replication/TestReplicationManager.java |  12 +-
 .../hdds/scm/node/TestDeadNodeHandler.java  |  24 +-
 .../container/TestCloseContainerWatcher.java| 289 ---
 .../ozone/container/common/TestEndPoint.java|   6 +-
 .../testutils/ReplicationNodeManagerMock.java   |  55 +---
 .../TestContainerStateManagerIntegration.java   |  11 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |   4 +-
 .../hdds/scm/pipeline/TestPipelineClose.java|   4 +-
 .../commandhandler/TestBlockDeletion.java   |   6 +-
 41 files changed, 558 insertions(+), 1157 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 5a9484a..edfa0f9 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -158,10 +158,18 @@ public class ContainerInfo implements 
Comparator,
 return usedBytes;
   }
 
+  public void setUsedBytes(long value) {
+usedBytes = value;
+  }
+
   public long getNumberOfKeys() {
 return numberOfKeys;
   }
 
+  public void setNumberOfKeys(long value) {
+numberOfKeys = value;
+  }
+
   public long getDeleteTransactionId() {
 return deleteTransactionId;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 4768cf8..12c33ff 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ 

[22/50] [abbrv] hadoop git commit: HADOOP-15916. Upgrade Maven Surefire plugin to 3.0.0-M1.

2018-11-12 Thread botong
HADOOP-15916. Upgrade Maven Surefire plugin to 3.0.0-M1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a736b5da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a736b5da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a736b5da

Branch: refs/heads/YARN-7402
Commit: a736b5da15084e8eb93d2f68f8eccc506ff7bea7
Parents: 9dbb2b6
Author: Akira Ajisaka 
Authored: Sat Nov 10 00:24:56 2018 +0900
Committer: Akira Ajisaka 
Committed: Sat Nov 10 00:24:56 2018 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a736b5da/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 5d38167..c985d7b 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -127,7 +127,7 @@
 
 
 -Xmx2048m 
-XX:+HeapDumpOnOutOfMemoryError
-2.22.1
+3.0.0-M1
 
${maven-surefire-plugin.version}
 
${maven-surefire-plugin.version}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HDDS-767. OM should not search for STDOUT root logger for audit logging. Contributed by Dinesh Chitlangia.

2018-11-12 Thread botong
HDDS-767. OM should not search for STDOUT root logger for audit logging. 
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c32b50d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c32b50d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c32b50d

Branch: refs/heads/YARN-7402
Commit: 9c32b50d610463bb50a25bb01606ceeea8e04507
Parents: 4e72844
Author: Márton Elek 
Authored: Mon Nov 12 10:54:41 2018 +0100
Committer: Márton Elek 
Committed: Mon Nov 12 10:54:41 2018 +0100

--
 hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c32b50d/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
--
diff --git a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties 
b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
index 7be51ac..57577e1 100644
--- a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
+++ b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
@@ -86,5 +86,5 @@ logger.audit.appenderRefs=rolling
 logger.audit.appenderRef.file.ref=RollingFile
 
 rootLogger.level=INFO
-rootLogger.appenderRefs=stdout
-rootLogger.appenderRef.stdout.ref=STDOUT
+#rootLogger.appenderRefs=stdout
+#rootLogger.appenderRef.stdout.ref=STDOUT


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: HDDS-576. Move ContainerWithPipeline creation to RPC endpoint. Contributed by Nanda kumar.

2018-11-12 Thread botong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18fe65d7/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index aada723..30e3536 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -102,10 +103,10 @@ public class TestCloseContainerByPipeline {
 .get(0).getBlocksLatestVersionOnly().get(0);
 
 long containerID = omKeyLocationInfo.getContainerID();
+ContainerInfo container = cluster.getStorageContainerManager()
+.getContainerManager().getContainer(ContainerID.valueof(containerID));
 Pipeline pipeline = cluster.getStorageContainerManager()
-.getContainerManager().getContainerWithPipeline(
-ContainerID.valueof(containerID))
-.getPipeline();
+.getPipelineManager().getPipeline(container.getPipelineID());
 List datanodes = pipeline.getNodes();
 Assert.assertEquals(datanodes.size(), 1);
 
@@ -158,10 +159,10 @@ public class TestCloseContainerByPipeline {
 .get(0).getBlocksLatestVersionOnly().get(0);
 
 long containerID = omKeyLocationInfo.getContainerID();
+ContainerInfo container = cluster.getStorageContainerManager()
+.getContainerManager().getContainer(ContainerID.valueof(containerID));
 Pipeline pipeline = cluster.getStorageContainerManager()
-.getContainerManager().getContainerWithPipeline(
-ContainerID.valueof(containerID))
-.getPipeline();
+.getPipelineManager().getPipeline(container.getPipelineID());
 List datanodes = pipeline.getNodes();
 Assert.assertEquals(datanodes.size(), 1);
 
@@ -216,10 +217,10 @@ public class TestCloseContainerByPipeline {
 .get(0).getBlocksLatestVersionOnly().get(0);
 
 long containerID = omKeyLocationInfo.getContainerID();
+ContainerInfo container = cluster.getStorageContainerManager()
+.getContainerManager().getContainer(ContainerID.valueof(containerID));
 Pipeline pipeline = cluster.getStorageContainerManager()
-.getContainerManager().getContainerWithPipeline(
-ContainerID.valueof(containerID))
-.getPipeline();
+.getPipelineManager().getPipeline(container.getPipelineID());
 List datanodes = pipeline.getNodes();
 Assert.assertEquals(3, datanodes.size());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18fe65d7/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index f3ce899..9cf51d1 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
@@ -80,28 +81,30 @@ public class TestCloseContainerHandler {
 cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
   

[47/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen

2018-11-12 Thread botong
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b97ad374/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
new file mode 100644
index 000..2ff879e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
@@ -0,0 +1,196 @@
+ {
+  "type": "capacityScheduler",
+  "capacity": 100.0,
+  "usedCapacity": 0.0,
+  "maxCapacity": 100.0,
+  "queueName": "root",
+  "queues": {
+"queue": [
+  {
+"type": "capacitySchedulerLeafQueueInfo",
+"capacity": 100.0,
+"usedCapacity": 0.0,
+"maxCapacity": 100.0,
+"absoluteCapacity": 100.0,
+"absoluteMaxCapacity": 100.0,
+"absoluteUsedCapacity": 0.0,
+"numApplications": 484,
+"queueName": "default",
+"state": "RUNNING",
+"resourcesUsed": {
+  "memory": 0,
+  "vCores": 0
+},
+"hideReservationQueues": false,
+"nodeLabels": [
+  "*"
+],
+"numActiveApplications": 484,
+"numPendingApplications": 0,
+"numContainers": 0,
+"maxApplications": 1,
+"maxApplicationsPerUser": 1,
+"userLimit": 100,
+"users": {
+  "user": [
+{
+  "username": "Default",
+  "resourcesUsed": {
+"memory": 0,
+"vCores": 0
+  },
+  "numPendingApplications": 0,
+  "numActiveApplications": 468,
+  "AMResourceUsed": {
+"memory": 30191616,
+"vCores": 468
+  },
+  "userResourceLimit": {
+"memory": 31490048,
+"vCores": 7612
+  }
+}
+  ]
+},
+"userLimitFactor": 1.0,
+"AMResourceLimit": {
+  "memory": 31490048,
+  "vCores": 7612
+},
+"usedAMResource": {
+  "memory": 30388224,
+  "vCores": 532
+},
+"userAMResourceLimit": {
+  "memory": 31490048,
+  "vCores": 7612
+},
+"preemptionDisabled": true
+  },
+  {
+"type": "capacitySchedulerLeafQueueInfo",
+"capacity": 100.0,
+"usedCapacity": 0.0,
+"maxCapacity": 100.0,
+"absoluteCapacity": 100.0,
+"absoluteMaxCapacity": 100.0,
+"absoluteUsedCapacity": 0.0,
+"numApplications": 484,
+"queueName": "default2",
+"state": "RUNNING",
+"resourcesUsed": {
+  "memory": 0,
+  "vCores": 0
+},
+"hideReservationQueues": false,
+"nodeLabels": [
+  "*"
+],
+"numActiveApplications": 484,
+"numPendingApplications": 0,
+"numContainers": 0,
+"maxApplications": 1,
+"maxApplicationsPerUser": 1,
+"userLimit": 100,
+"users": {
+  "user": [
+{
+  "username": "Default",
+  "resourcesUsed": {
+"memory": 0,
+"vCores": 0
+  },
+  "numPendingApplications": 0,
+  "numActiveApplications": 468,
+  "AMResourceUsed": {
+"memory": 30191616,
+"vCores": 468
+  },
+  "userResourceLimit": {
+"memory": 31490048,
+"vCores": 7612
+  }
+}
+  ]
+},
+"userLimitFactor": 1.0,
+"AMResourceLimit": {
+  "memory": 31490048,
+  "vCores": 7612
+},
+"usedAMResource": {
+  "memory": 30388224,
+  "vCores": 532
+},
+"userAMResourceLimit": {
+  "memory": 31490048,
+  "vCores": 7612
+},
+"preemptionDisabled": true
+  }
+]
+  },
+  "health": {
+"lastrun": 1517951638085,
+"operationsInfo": {
+  "entry": {
+"key": 

[48/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen

2018-11-12 Thread botong
YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b97ad374
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b97ad374
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b97ad374

Branch: refs/heads/YARN-7402
Commit: b97ad374035275d4c53dbba4f3cd2f51f6c6e7de
Parents: eac28b5
Author: Botong Huang 
Authored: Fri Mar 23 17:07:10 2018 -0700
Committer: Botong Huang 
Committed: Mon Nov 12 15:09:38 2018 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  36 +-
 .../src/main/resources/yarn-default.xml |  40 +++
 .../utils/FederationStateStoreFacade.java   |  13 +
 .../pom.xml |  18 +
 .../globalpolicygenerator/GPGContext.java   |   4 +
 .../globalpolicygenerator/GPGContextImpl.java   |  10 +
 .../globalpolicygenerator/GPGPolicyFacade.java  | 220 
 .../server/globalpolicygenerator/GPGUtils.java  |  80 +
 .../GlobalPolicyGenerator.java  |  17 +
 .../policygenerator/GlobalPolicy.java   |  76 +
 .../policygenerator/NoOpGlobalPolicy.java   |  36 ++
 .../policygenerator/PolicyGenerator.java| 261 ++
 .../UniformWeightedLocalityGlobalPolicy.java|  71 
 .../policygenerator/package-info.java   |  24 ++
 .../TestGPGPolicyFacade.java| 202 +++
 .../policygenerator/TestPolicyGenerator.java| 338 +++
 .../src/test/resources/schedulerInfo1.json  | 134 
 .../src/test/resources/schedulerInfo2.json  | 196 +++
 18 files changed, 1775 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b97ad374/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 0b35b9f..c42ebb1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3427,7 +3427,7 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
   false;
 
-  private static final String FEDERATION_GPG_PREFIX =
+  public static final String FEDERATION_GPG_PREFIX =
   FEDERATION_PREFIX + "gpg.";
 
   // The number of threads to use for the GPG scheduled executor service
@@ -3445,6 +3445,40 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
   public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180;
 
+  public static final String FEDERATION_GPG_POLICY_PREFIX =
+  FEDERATION_GPG_PREFIX + "policy.generator.";
+
+  /** The interval at which the policy generator runs, default is one hour. */
+  public static final String GPG_POLICY_GENERATOR_INTERVAL_MS =
+  FEDERATION_GPG_POLICY_PREFIX + "interval-ms";
+  public static final long DEFAULT_GPG_POLICY_GENERATOR_INTERVAL_MS = -1;
+
+  /**
+   * The configured policy generator class, runs NoOpGlobalPolicy by
+   * default.
+   */
+  public static final String GPG_GLOBAL_POLICY_CLASS =
+  FEDERATION_GPG_POLICY_PREFIX + "class";
+  public static final String DEFAULT_GPG_GLOBAL_POLICY_CLASS =
+  "org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator."
+  + "NoOpGlobalPolicy";
+
+  /**
+   * Whether or not the policy generator is running in read only (won't modify
+   * policies), default is false.
+   */
+  public static final String GPG_POLICY_GENERATOR_READONLY =
+  FEDERATION_GPG_POLICY_PREFIX + "readonly";
+  public static final boolean DEFAULT_GPG_POLICY_GENERATOR_READONLY =
+  false;
+
+  /**
+   * Which sub-clusters the policy generator should blacklist.
+   */
+  public static final String GPG_POLICY_GENERATOR_BLACKLIST =
+  FEDERATION_GPG_POLICY_PREFIX + "blacklist";
+
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b97ad374/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 

  1   2   >