[hadoop] branch ozone-0.4 updated: HDDS-1320. Update ozone to latest ratis snapshot build (0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.

2019-03-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 8f422d7  HDDS-1320. Update ozone to latest ratis snapshot build 
(0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.
8f422d7 is described below

commit 8f422d7b5ec3bbc3b0fbf5ecde636a0c74b28ef0
Author: Arpit Agarwal 
AuthorDate: Thu Mar 21 14:24:02 2019 -0700

HDDS-1320. Update ozone to latest ratis snapshot build 
(0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.

(cherry picked from commit 90afc9ab0382d45083dca1434f02936985798e48)
---
 .../container/common/transport/server/ratis/XceiverServerRatis.java| 3 ++-
 hadoop-hdds/pom.xml| 2 +-
 hadoop-ozone/pom.xml   | 2 +-
 3 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index f1ace28..e70e012 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -494,7 +494,8 @@ public final class XceiverServerRatis extends XceiverServer 
{
   RaftClientRequest.Type type) {
 return new RaftClientRequest(clientId, server.getId(),
 RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()),
-nextCallId(), 0, Message.valueOf(request.toByteString()), type);
+nextCallId(), Message.valueOf(request.toByteString()), type,
+null);
   }
 
   private void handlePipelineFailure(RaftGroupId groupId,
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 6f95547..61fe426 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -46,7 +46,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 0.4.0-SNAPSHOT
 
 
-0.4.0-5680cf5-SNAPSHOT
+0.4.0-1fc5ace-SNAPSHOT
 
 1.60
 
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index ae17655..09cede0 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -29,7 +29,7 @@
 3.2.0
 0.4.0-SNAPSHOT
 0.4.0-SNAPSHOT
-0.4.0-5680cf5-SNAPSHOT
+0.4.0-1fc5ace-SNAPSHOT
 1.60
 Badlands
 ${ozone.version}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1320. Update ozone to latest ratis snapshot build (0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.

2019-03-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 90afc9a  HDDS-1320. Update ozone to latest ratis snapshot build 
(0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.
90afc9a is described below

commit 90afc9ab0382d45083dca1434f02936985798e48
Author: Arpit Agarwal 
AuthorDate: Thu Mar 21 14:24:02 2019 -0700

HDDS-1320. Update ozone to latest ratis snapshot build 
(0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.
---
 .../common/transport/server/ratis/XceiverServerRatis.java | 3 ++-
 hadoop-hdds/pom.xml   | 2 +-
 .../hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java   | 8 
 hadoop-ozone/pom.xml  | 2 +-
 4 files changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index d0a56f9..8f09ff2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -495,7 +495,8 @@ public final class XceiverServerRatis extends XceiverServer 
{
   RaftClientRequest.Type type) {
 return new RaftClientRequest(clientId, server.getId(),
 RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()),
-nextCallId(), 0, Message.valueOf(request.toByteString()), type);
+nextCallId(), Message.valueOf(request.toByteString()), type,
+null);
   }
 
   private void handlePipelineFailure(RaftGroupId groupId,
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 9ca65c0..32b2c03 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -46,7 +46,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 0.5.0-SNAPSHOT
 
 
-0.4.0-5680cf5-SNAPSHOT
+0.4.0-1fc5ace-SNAPSHOT
 
 1.60
 
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
index 4c2edfe..4406af6 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
@@ -134,11 +134,11 @@ public class TestOzoneManagerStateMachine {
 
 RaftClientRequest raftClientRequest =
 new RaftClientRequest(ClientId.randomId(),
-RaftPeerId.valueOf("random"), raftGroupId, 1, 1,
+RaftPeerId.valueOf("random"), raftGroupId, 1,
 Message.valueOf(
 OMRatisHelper.convertRequestToByteString(omRequest)),
 RaftClientRequest.Type.valueOf(
-RaftProtos.WriteRequestTypeProto.getDefaultInstance()));
+RaftProtos.WriteRequestTypeProto.getDefaultInstance()), null);
 
 TransactionContext transactionContext =
 ozoneManagerStateMachine.startTransaction(raftClientRequest);
@@ -232,11 +232,11 @@ public class TestOzoneManagerStateMachine {
 
 RaftClientRequest raftClientRequest =
 new RaftClientRequest(ClientId.randomId(),
-RaftPeerId.valueOf("random"), raftGroupId, 1, 1,
+RaftPeerId.valueOf("random"), raftGroupId, 1,
 Message.valueOf(
 OMRatisHelper.convertRequestToByteString(omRequest)),
 RaftClientRequest.Type.valueOf(
-RaftProtos.WriteRequestTypeProto.getDefaultInstance()));
+RaftProtos.WriteRequestTypeProto.getDefaultInstance()), null);
 
 TransactionContext transactionContext =
 ozoneManagerStateMachine.startTransaction(raftClientRequest);
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 4dcb80e..b243ccc 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -29,7 +29,7 @@
 3.2.0
 0.5.0-SNAPSHOT
 0.5.0-SNAPSHOT
-0.4.0-5680cf5-SNAPSHOT
+0.4.0-1fc5ace-SNAPSHOT
 1.60
 Crater Lake
 ${ozone.version}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded port numbers. Contributed by Arpit Agarwal. (#633)

2019-03-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 6c009a3  HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded 
port numbers. Contributed by Arpit Agarwal. (#633)
6c009a3 is described below

commit 6c009a3d044665ff3227f793004be2910f5d7eb1
Author: Arpit Agarwal 
AuthorDate: Thu Mar 21 14:18:58 2019 -0700

HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded port numbers. 
Contributed by Arpit Agarwal. (#633)

Change-Id: I9656af4a7f41812da9d125c10ae0e8daf3dcf7f5
(cherry picked from commit 2828f8c339d7a03bd2bedf99c7700a3bbeec3a34)
---
 .../java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java  | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
index fc85d8e..b071e27 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
@@ -94,8 +93,8 @@ public class TestOzoneManagerHttpServer {
 
   @Test public void testHttpPolicy() throws Exception {
 conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
-InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 
0);
+conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "localhost:0");
+conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, "localhost:0");
 
 OzoneManagerHttpServer server = null;
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded port numbers. Contributed by Arpit Agarwal. (#633)

2019-03-21 Thread ajay
This is an automated email from the ASF dual-hosted git repository.

ajay pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2828f8c  HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded 
port numbers. Contributed by Arpit Agarwal. (#633)
2828f8c is described below

commit 2828f8c339d7a03bd2bedf99c7700a3bbeec3a34
Author: Arpit Agarwal 
AuthorDate: Thu Mar 21 14:18:58 2019 -0700

HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded port numbers. 
Contributed by Arpit Agarwal. (#633)

Change-Id: I9656af4a7f41812da9d125c10ae0e8daf3dcf7f5
---
 .../java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java  | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
index fc85d8e..b071e27 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
@@ -94,8 +93,8 @@ public class TestOzoneManagerHttpServer {
 
   @Test public void testHttpPolicy() throws Exception {
 conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
-InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 
0);
+conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "localhost:0");
+conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, "localhost:0");
 
 OzoneManagerHttpServer server = null;
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9402. Opportunistic containers should not be scheduled on Decommissioning nodes. Contributed by Abhishek Modi.

2019-03-21 Thread gifuma
This is an automated email from the ASF dual-hosted git repository.

gifuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 548997d  YARN-9402. Opportunistic containers should not be scheduled 
on Decommissioning nodes. Contributed by Abhishek Modi.
548997d is described below

commit 548997d6c9c5a1b9734ee00d065ce48a189458e6
Author: Giovanni Matteo Fumarola 
AuthorDate: Thu Mar 21 12:04:05 2019 -0700

YARN-9402. Opportunistic containers should not be scheduled on 
Decommissioning nodes. Contributed by Abhishek Modi.
---
 .../distributed/NodeQueueLoadMonitor.java  | 11 +++--
 .../distributed/TestNodeQueueLoadMonitor.java  | 50 ++
 2 files changed, 57 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
index ca35886..e093b2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -230,8 +231,9 @@ public class NodeQueueLoadMonitor implements ClusterMonitor 
{
 try {
   ClusterNode currentNode = this.clusterNodes.get(rmNode.getNodeID());
   if (currentNode == null) {
-if (estimatedQueueWaitTime != -1
-|| comparator == LoadComparator.QUEUE_LENGTH) {
+if (rmNode.getState() != NodeState.DECOMMISSIONING &&
+(estimatedQueueWaitTime != -1 ||
+comparator == LoadComparator.QUEUE_LENGTH)) {
   this.clusterNodes.put(rmNode.getNodeID(),
   new ClusterNode(rmNode.getNodeID())
   .setQueueWaitTime(estimatedQueueWaitTime)
@@ -246,8 +248,9 @@ public class NodeQueueLoadMonitor implements ClusterMonitor 
{
   "wait queue length [" + waitQueueLength + "]");
 }
   } else {
-if (estimatedQueueWaitTime != -1
-|| comparator == LoadComparator.QUEUE_LENGTH) {
+if (rmNode.getState() != NodeState.DECOMMISSIONING &&
+(estimatedQueueWaitTime != -1 ||
+comparator == LoadComparator.QUEUE_LENGTH)) {
   currentNode
   .setQueueWaitTime(estimatedQueueWaitTime)
   .setQueueLength(waitQueueLength)
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/TestNodeQueueLoadMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/TestNodeQueueLoadMonitor.java
index 85eddaa..bbc0086 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/TestNodeQueueLoadMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/TestNodeQueueLoadMonitor.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed;
 
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
@@ -99,6 +100,23 @@ public class TestNodeQueueLoadMonitor {
 Assert.assertEquals("h3:3", nodeIds.get(0).toString());
 Assert.assertEquals("h2:2", nodeIds.get(1).toString());
 Assert.assertEquals("h1:1", nodeIds.get(2).toString());
+
+// Now update node 2 to DECOMMISSIONING state
+selector
+.updateNode(createRMNode("h2", 2, 1, 10, NodeState.DECOMMISSIONING));
+selector.computeTask.run();
+nodeIds = selector.selectNodes();
+Assert.assertEquals(2, 

[hadoop] branch trunk updated: YARN-9267. General improvements in FpgaResourceHandlerImpl. Contributed by Peter Bacsko.

2019-03-21 Thread devaraj
This is an automated email from the ASF dual-hosted git repository.

devaraj pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a99eb80  YARN-9267. General improvements in FpgaResourceHandlerImpl. 
Contributed by Peter Bacsko.
a99eb80 is described below

commit a99eb80659835107f4015c859b3319bf3a70c281
Author: Devaraj K 
AuthorDate: Thu Mar 21 11:15:56 2019 -0700

YARN-9267. General improvements in FpgaResourceHandlerImpl. Contributed by 
Peter Bacsko.
---
 .../resources/fpga/FpgaResourceAllocator.java  |  28 +++--
 .../resources/fpga/FpgaResourceHandlerImpl.java|  59 ++
 .../resources/fpga/TestFpgaResourceHandler.java| 124 +++--
 3 files changed, 151 insertions(+), 60 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
index 334c6bd..e5622f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
-import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga.FpgaDiscoverer;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -143,6 +142,8 @@ public class FpgaResourceAllocator {
 private Integer minor;
 // IP file identifier. matrix multiplication for instance
 private String IPID;
+// SHA-256 hash of the uploaded aocx file
+private String aocxHash;
 // the device name under /dev
 private String devName;
 // the alias device name. Intel use acl number acl0 to acl31
@@ -168,6 +169,14 @@ public class FpgaResourceAllocator {
   return IPID;
 }
 
+public String getAocxHash() {
+  return aocxHash;
+}
+
+public void setAocxHash(String hash) {
+  this.aocxHash = hash;
+}
+
 public void setIPID(String IPID) {
   this.IPID = IPID;
 }
@@ -263,7 +272,8 @@ public class FpgaResourceAllocator {
 @Override
 public String toString() {
   return "FPGA Device:(Type: " + this.type + ", Major: " +
-  this.major + ", Minor: " + this.minor + ", IPID: " + this.IPID + ")";
+  this.major + ", Minor: " + this.minor + ", IPID: " +
+  this.IPID + ", Hash: " + this.aocxHash + ")";
 }
   }
 
@@ -279,11 +289,14 @@ public class FpgaResourceAllocator {
   }
 
   public synchronized void updateFpga(String requestor,
-  FpgaDevice device, String newIPID) {
+  FpgaDevice device, String newIPID, String newHash) {
 List usedFpgas = usedFpgaByRequestor.get(requestor);
 int index = findMatchedFpga(usedFpgas, device);
 if (-1 != index) {
   usedFpgas.get(index).setIPID(newIPID);
+  FpgaDevice fpga = usedFpgas.get(index);
+  fpga.setIPID(newIPID);
+  fpga.setAocxHash(newHash);
 } else {
   LOG.warn("Failed to update FPGA due to unknown reason " +
   "that no record for this allocated device:" + device);
@@ -307,12 +320,12 @@ public class FpgaResourceAllocator {
* @param type vendor plugin supported FPGA device type
* @param count requested FPGA slot count
* @param container container id
-   * @param IPIDPreference allocate slot with this IPID first
+   * @param ipidHash hash of the localized aocx file
* @return Instance consists two List of allowed and denied {@link 
FpgaDevice}
* @throws ResourceHandlerException When failed to allocate or write state 
store
* */
   public synchronized FpgaAllocation assignFpga(String type, long count,
-  Container container, String IPIDPreference) throws 
ResourceHandlerException {
+  Container container, String ipidHash) throws ResourceHandlerException {
 List currentAvailableFpga = availableFpga.get(type);
 String requestor = container.getContainerId().toString();
 if (null == currentAvailableFpga) {
@@ -327,8 +340,9 @@ public class FpgaResourceAllocator {
   List assignedFpgas = new LinkedList<>();
   int matchIPCount = 0;
   

[hadoop] branch trunk updated: HADOOP-16058. S3A tests to include Terasort.

2019-03-21 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9f1c017  HADOOP-16058. S3A tests to include Terasort.
9f1c017 is described below

commit 9f1c017f444d5e57899493dc23207c6b5fc26dae
Author: Steve Loughran 
AuthorDate: Thu Mar 21 11:15:37 2019 +

HADOOP-16058. S3A tests to include Terasort.

Contributed by Steve Loughran.

This includes
 - HADOOP-15890. Some S3A committer tests don't match ITest* pattern; don't 
run in maven
 - MAPREDUCE-7090. BigMapOutput example doesn't work with paths off cluster 
fs
 - MAPREDUCE-7091. Terasort on S3A to switch to new committers
 - MAPREDUCE-7092. MR examples to work better against cloud stores
---
 .../org/apache/hadoop/mapred/BigMapOutput.java |  18 +-
 .../java/org/apache/hadoop/mapred/MRBench.java |   2 +-
 .../hadoop/examples/terasort/TeraOutputFormat.java |  11 -
 .../apache/hadoop/examples/terasort/TeraSort.java  |   2 +-
 .../hadoop/examples/terasort/TestTeraSort.java |   6 +-
 hadoop-tools/hadoop-aws/pom.xml|   4 +
 .../hadoop/fs/s3a/commit/AbstractCommitITest.java  |  64 ++
 .../fs/s3a/commit/AbstractITCommitMRJob.java   | 156 ++---
 .../fs/s3a/commit/AbstractYarnClusterITest.java| 256 +
 ...CommitMRJob.java => ITestMagicCommitMRJob.java} |  30 ++-
 ...itMRJob.java => ITestDirectoryCommitMRJob.java} |  30 ++-
 ...itMRJob.java => ITestPartitionCommitMRJob.java} |  31 ++-
 ...mmitMRJob.java => ITestStagingCommitMRJob.java} |  46 +++-
 ...st.java => ITestStagingCommitMRJobBadDest.java} |  29 ++-
 .../commit/terasort/AbstractCommitTerasortIT.java  | 241 +++
 .../ITestTerasortDirectoryCommitter.java}  |  37 ++-
 .../ITestTerasortMagicCommitter.java}  |  55 ++---
 17 files changed, 816 insertions(+), 202 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java
index 964673b..35992f5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java
@@ -128,17 +128,20 @@ public class BigMapOutput extends Configured implements 
Tool {
 usage();
   }
 }
-
-FileSystem fs = FileSystem.get(getConf());
+if (bigMapInput == null || outputPath == null) {
+  // report usage and exit
+  usage();
+  // this stops IDES warning about unset local variables.
+  return -1;
+}
+
 JobConf jobConf = new JobConf(getConf(), BigMapOutput.class);
 
 jobConf.setJobName("BigMapOutput");
 jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
 jobConf.setOutputFormat(SequenceFileOutputFormat.class);
 FileInputFormat.setInputPaths(jobConf, bigMapInput);
-if (fs.exists(outputPath)) {
-  fs.delete(outputPath, true);
-}
+outputPath.getFileSystem(jobConf).delete(outputPath, true);
 FileOutputFormat.setOutputPath(jobConf, outputPath);
 jobConf.setMapperClass(IdentityMapper.class);
 jobConf.setReducerClass(IdentityReducer.class);
@@ -146,7 +149,10 @@ public class BigMapOutput extends Configured implements 
Tool {
 jobConf.setOutputValueClass(BytesWritable.class);
 
 if (createInput) {
-  createBigMapInputFile(jobConf, fs, bigMapInput, fileSizeInMB);
+  createBigMapInputFile(jobConf,
+  bigMapInput.getFileSystem(jobConf),
+  bigMapInput,
+  fileSizeInMB);
 }
 
 Date startTime = new Date();
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
index 5328756..36f4693 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
@@ -284,7 +284,7 @@ public class MRBench extends Configured implements Tool{
   }
 
 JobConf jobConf = setupJob(numMaps, numReduces, jarFile);
-FileSystem fs = FileSystem.get(jobConf);
+FileSystem fs = BASE_DIR.getFileSystem(jobConf);
 Path inputFile = new Path(INPUT_DIR, "input_" + (new Random()).nextInt() + 
".txt");