hadoop git commit: HDFS-13151. Fix the javadoc error in ReplicaInfo

2018-02-15 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 6f64530fc -> 1fb2f020e


HDFS-13151. Fix the javadoc error in ReplicaInfo

Signed-off-by: Akira Ajisaka 
(cherry picked from commit a1e05e02927f29e5598fdc665ac997667e6b00b1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fb2f020
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fb2f020
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fb2f020

Branch: refs/heads/branch-3.1
Commit: 1fb2f020e25dad01fc7372b5ab1a491dc662453f
Parents: 6f64530
Author: Bharat Viswanadham 
Authored: Fri Feb 16 16:22:24 2018 +0900
Committer: Akira Ajisaka 
Committed: Fri Feb 16 16:23:29 2018 +0900

--
 .../java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fb2f020/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index 3718799..4acf236 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -53,7 +53,6 @@ abstract public class ReplicaInfo extends Block
* Constructor.
* @param block a block
* @param vol volume where replica is located
-   * @param dir directory path where block and meta files are located
*/
   ReplicaInfo(Block block, FsVolumeSpi vol) {
 this(vol, block.getBlockId(), block.getNumBytes(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13151. Fix the javadoc error in ReplicaInfo

2018-02-15 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk aae629913 -> a1e05e029


HDFS-13151. Fix the javadoc error in ReplicaInfo

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1e05e02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1e05e02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1e05e02

Branch: refs/heads/trunk
Commit: a1e05e02927f29e5598fdc665ac997667e6b00b1
Parents: aae6299
Author: Bharat Viswanadham 
Authored: Fri Feb 16 16:22:24 2018 +0900
Committer: Akira Ajisaka 
Committed: Fri Feb 16 16:22:24 2018 +0900

--
 .../java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e05e02/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index 3718799..4acf236 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -53,7 +53,6 @@ abstract public class ReplicaInfo extends Block
* Constructor.
* @param block a block
* @param vol volume where replica is located
-   * @param dir directory path where block and meta files are located
*/
   ReplicaInfo(Block block, FsVolumeSpi vol) {
 this(vol, block.getBlockId(), block.getNumBytes(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-7292. Retrospect Resource Profile Behavior for overriding capability. Contributed by Wangda Tan.

2018-02-15 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8013475d4 -> aae629913


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aae62991/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestProfileCapability.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestProfileCapability.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestProfileCapability.java
deleted file mode 100644
index cbad3f4..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestProfileCapability.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.api;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ProfileCapability;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Test profile capability behavior.
- */
-public class TestProfileCapability {
-  @Before
-  public void setup() {
-// Initialize resource map
-Map riMap = new HashMap<>();
-
-// Initialize mandatory resources
-riMap.put(ResourceInformation.MEMORY_URI, ResourceInformation.MEMORY_MB);
-riMap.put(ResourceInformation.VCORES_URI, ResourceInformation.VCORES);
-
-for (int i = 0; i < 5; i++) {
-  String resourceName = "res-" + i;
-  riMap.put(resourceName, ResourceInformation
-  .newInstance(resourceName, "", 0, ResourceTypes.COUNTABLE, 0,
-  Integer.MAX_VALUE));
-}
-
-ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
-  }
-
-  @Test
-  public void testConvertProfileCapabilityToResource() {
-Resource profile1 = Resource.newInstance(1, 1);
-profile1.setResourceValue("res-0", 1);
-profile1.setResourceValue("res-1", 1);
-
-Resource profile2 = Resource.newInstance(2, 2);
-profile2.setResourceValue("res-0", 2);
-profile2.setResourceValue("res-1", 2);
-
-Resource profile3 = Resource.newInstance(3, 3);
-profile3.setResourceValue("res-0", 3);
-profile3.setResourceValue("res-1", 3);
-
-Map profiles = ImmutableMap.of("profile1", profile1,
-"profile2", profile2, "profile3", profile3, "default", profile1);
-
-// Test case 1, set override value to (1, 1, 0), since we only allow
-// overwrite for positive value, it is still profile1.
-ProfileCapability pc = ProfileCapability.newInstance("profile1",
-Resource.newInstance(1, 1));
-Assert.assertEquals(profile1, ProfileCapability.toResource(pc, profiles));
-
-// Test case 2, similarly, negative value won't be respected.
-pc = ProfileCapability.newInstance("profile1",
-Resource.newInstance(1, -1));
-Assert.assertEquals(profile1, ProfileCapability.toResource(pc, profiles));
-
-// Test case 3, do overwrite for memory and vcores, the result is (3,3,1,1)
-Resource expected = Resource.newInstance(3, 3);
-expected.setResourceValue("res-0", 1);
-expected.setResourceValue("res-1", 1);
-pc = ProfileCapability.newInstance("profile1",
-Resource.newInstance(3, 3));
-Assert.assertEquals(expected, ProfileCapability.toResource(pc, profiles));
-
-// Test case 3, do overwrite for mem and res-1, the result is (3,1,3,1)
-expected = Resource.newInstance(3, 1);
-expected.setResourceValue("res-0", 3);
-expected.setResourceValue("res-1", 1);
-
-Resource overwrite = Resource.newInstance(3, 0);
-overwrite.setResourceValue("res-0", 3);
-overwrite.setResourceValue("res-1", 0);
-
-pc = ProfileCapability.newInstance("profile1", overwrite);
-Assert.assertEquals(expected, ProfileCapability.toResource(pc, profiles));
-
-// 

[2/2] hadoop git commit: YARN-7292. Retrospect Resource Profile Behavior for overriding capability. Contributed by Wangda Tan.

2018-02-15 Thread sunilg
YARN-7292. Retrospect Resource Profile Behavior for overriding capability. 
Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aae62991
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aae62991
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aae62991

Branch: refs/heads/trunk
Commit: aae629913cee0157c945a2c7384c7bf398f10616
Parents: 8013475
Author: Sunil G 
Authored: Fri Feb 16 12:37:40 2018 +0530
Committer: Sunil G 
Committed: Fri Feb 16 12:37:40 2018 +0530

--
 .../yarn/api/records/ProfileCapability.java | 173 ---
 .../yarn/api/records/ResourceRequest.java   |  43 +
 .../src/main/proto/yarn_protos.proto|   6 -
 .../distributedshell/ApplicationMaster.java |  22 +--
 .../applications/distributedshell/Client.java   |  37 ++--
 .../hadoop/yarn/client/api/AMRMClient.java  |  77 ++---
 .../yarn/client/api/impl/AMRMClientImpl.java| 126 +++---
 .../client/api/impl/RemoteRequestsTable.java| 100 ---
 .../yarn/client/api/impl/TestAMRMClient.java|  73 +++-
 .../impl/TestAMRMClientContainerRequest.java|   5 +-
 .../yarn/client/api/impl/TestNMClient.java  |  37 ++--
 ...TestOpportunisticContainerAllocationE2E.java |  25 ++-
 .../impl/pb/ProfileCapabilityPBImpl.java| 126 --
 .../records/impl/pb/ResourceRequestPBImpl.java  |  40 +
 .../hadoop/yarn/api/TestPBImplRecords.java  |  10 --
 .../hadoop/yarn/api/TestProfileCapability.java  | 109 
 .../hadoop/yarn/server/utils/BuilderUtils.java  |   1 -
 .../server/resourcemanager/RMServerUtils.java   |  33 
 .../scheduler/SchedulerUtils.java   |  10 --
 .../TestApplicationMasterService.java   |   6 -
 .../resource/MockResourceProfileManager.java|  79 -
 ...CapacitySchedulerWithMultiResourceTypes.java | 110 
 22 files changed, 183 insertions(+), 1065 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aae62991/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
deleted file mode 100644
index d6cb635..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.api.records;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.util.Records;
-
-import java.util.Map;
-
-/**
- * Class to capture capability requirements when using resource profiles. The
- * ProfileCapability is meant to be used as part of the ResourceRequest. A
- * profile capability has two pieces - the resource profile name and the
- * overrides. The resource profile specifies the name of the resource profile
- * to be used and the capability override is the overrides desired on specific
- * resource types.
- *
- * For example, if you have a resource profile "small" that maps to
- * {@literal <4096M, 2 cores, 1 gpu>} and you set the capability override to
- * {@literal <8192M, 0 cores, 0 gpu>}, then the actual resource allocation on
- * the ResourceManager will be {@literal <8192M, 2 cores, 1 gpu>}.
- *
- * Note that the conversion from the ProfileCapability to the Resource class
- * with the actual resource requirements will be done by the ResourceManager,
- * which has the actual profile to Resource mapping.
- *
- */
-@InterfaceAudience.Public

[03/50] [abbrv] hadoop git commit: HDFS-11701. NPE from Unresolved Host causes permanent DFSInputStream failures. Contributed by Lokesh Jain.

2018-02-15 Thread aengineer
HDFS-11701. NPE from Unresolved Host causes permanent DFSInputStream failures. 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b061215e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b061215e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b061215e

Branch: refs/heads/HDFS-7240
Commit: b061215ecfebe476bf58f70788113d1af816f553
Parents: 456705a
Author: Jitendra Pandey 
Authored: Wed Feb 7 11:21:41 2018 -0800
Committer: Jitendra Pandey 
Committed: Wed Feb 7 11:22:36 2018 -0800

--
 .../org/apache/hadoop/hdfs/ClientContext.java   |  3 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  6 ++-
 .../hdfs/client/impl/BlockReaderFactory.java| 40 +++-
 .../hdfs/shortcircuit/DomainSocketFactory.java  |  3 +-
 .../client/impl/TestBlockReaderFactory.java | 33 
 5 files changed, 64 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
index a31945c..ad1b359 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -238,7 +239,7 @@ public class ClientContext {
 return byteArrayManager;
   }
 
-  public int getNetworkDistance(DatanodeInfo datanodeInfo) {
+  public int getNetworkDistance(DatanodeInfo datanodeInfo) throws IOException {
 // If applications disable the feature or the client machine can't
 // resolve its network location, clientNode will be set to null.
 if (clientNode == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 32e5d0f..2edd755 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -550,7 +550,11 @@ public class DFSUtilClient {
   private static final Map localAddrMap = Collections
   .synchronizedMap(new HashMap());
 
-  public static boolean isLocalAddress(InetSocketAddress targetAddr) {
+  public static boolean isLocalAddress(InetSocketAddress targetAddr)
+  throws IOException {
+if (targetAddr.isUnresolved()) {
+  throw new IOException("Unresolved host: " + targetAddr);
+}
 InetAddress addr = targetAddr.getAddress();
 Boolean cached = localAddrMap.get(addr.getHostAddress());
 if (cached != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 60dde82..e83c8ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -357,28 +357,32 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   return reader;
 }
 final ShortCircuitConf scConf = conf.getShortCircuitConf();
-if (scConf.isShortCircuitLocalReads() && allowShortCircuitLocalReads) {
-  if (clientContext.getUseLegacyBlockReaderLocal()) {
-reader = getLegacyBlockReaderLocal();
-  

[23/50] [abbrv] hadoop git commit: HDFS-8693. Addendum patch to execute the command using UGI. Contributed by Brahma Reddy Battula.

2018-02-15 Thread aengineer
HDFS-8693. Addendum patch to execute the command using UGI. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35c17351
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35c17351
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35c17351

Branch: refs/heads/HDFS-7240
Commit: 35c17351cab645dcc72e0d2ae1608507aa787ffb
Parents: 3414fd1
Author: Brahma Reddy Battula 
Authored: Mon Feb 12 22:14:34 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Mon Feb 12 22:14:34 2018 +0530

--
 .../hdfs/server/datanode/BlockPoolManager.java   | 15 ++-
 1 file changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c17351/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
index f6a11c2..141550e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
@@ -253,7 +253,20 @@ class BlockPoolManager {
   lifelineAddrs.add(nnIdToLifelineAddr != null ?
   nnIdToLifelineAddr.get(nnId) : null);
 }
-bpos.refreshNNList(addrs, lifelineAddrs);
+try {
+  UserGroupInformation.getLoginUser()
+  .doAs(new PrivilegedExceptionAction() {
+@Override
+public Object run() throws Exception {
+  bpos.refreshNNList(addrs, lifelineAddrs);
+  return null;
+}
+  });
+} catch (InterruptedException ex) {
+  IOException ioe = new IOException();
+  ioe.initCause(ex.getCause());
+  throw ioe;
+}
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: YARN-7838. Support AND/OR constraints in Distributed Shell. Contributed by Weiwei Yang.

2018-02-15 Thread aengineer
YARN-7838. Support AND/OR constraints in Distributed Shell. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a08c0488
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a08c0488
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a08c0488

Branch: refs/heads/HDFS-7240
Commit: a08c048832d68c203fbdfce8d9f0e7dcccb02a55
Parents: 25fbec6
Author: Weiwei Yang 
Authored: Sun Feb 11 14:20:46 2018 +0800
Committer: Weiwei Yang 
Committed: Sun Feb 11 14:20:46 2018 +0800

--
 .../PlacementConstraintParseException.java  |  28 +
 .../constraint/PlacementConstraintParser.java   | 615 +++
 .../yarn/util/constraint/package-info.java  |  22 +
 .../resource/TestPlacementConstraintParser.java | 372 +++
 .../distributedshell/ApplicationMaster.java |  10 +-
 .../applications/distributedshell/Client.java   |   8 +-
 .../distributedshell/PlacementSpec.java |  86 +--
 7 files changed, 1075 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParseException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParseException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParseException.java
new file mode 100644
index 000..8f3e28c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParseException.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.util.constraint;
+
+/**
+ * Exception when the placement constraint parser fails to parse an expression.
+ */
+public class PlacementConstraintParseException extends Exception {
+
+  public PlacementConstraintParseException(String msg) {
+super(msg);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
new file mode 100644
index 000..603e692
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
@@ -0,0 +1,615 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.util.constraint;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import 

[42/50] [abbrv] hadoop git commit: HADOOP-15176. Enhance IAM Assumed Role support in S3A client. Contributed by Steve Loughran

2018-02-15 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
deleted file mode 100644
index 7c8760b..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.file.AccessDeniedException;
-import java.util.concurrent.Callable;
-
-import com.amazonaws.auth.AWSCredentials;
-import 
com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
-import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-
-/**
- * Tests use of assumed roles.
- * Only run if an assumed role is provided.
- */
-public class ITestAssumeRole extends AbstractS3ATestBase {
-
-  private static final Logger LOG =
-  LoggerFactory.getLogger(ITestAssumeRole.class);
-
-  private static final String ARN_EXAMPLE
-  = "arn:aws:kms:eu-west-1:000:key/" +
-  "000-16c9-4832-a1a9-c8bbef25ec8b";
-
-  private static final String E_BAD_ROLE
-  = "Not authorized to perform sts:AssumeRole";
-
-  /**
-   * This is AWS policy removes read access.
-   */
-  public static final String RESTRICTED_POLICY = "{\n"
-  + "   \"Version\": \"2012-10-17\",\n"
-  + "   \"Statement\": [{\n"
-  + "  \"Effect\": \"Deny\",\n"
-  + "  \"Action\": \"s3:ListObjects\",\n"
-  + "  \"Resource\": \"*\"\n"
-  + "}\n"
-  + "   ]\n"
-  + "}";
-
-  private void assumeRoleTests() {
-assume("No ARN for role tests", !getAssumedRoleARN().isEmpty());
-  }
-
-  private String getAssumedRoleARN() {
-return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
-  }
-
-  /**
-   * Expect a filesystem to fail to instantiate.
-   * @param conf config to use
-   * @param clazz class of exception to expect
-   * @param text text in exception
-   * @param  type of exception as inferred from clazz
-   * @throws Exception if the exception was the wrong class
-   */
-  private  void expectFileSystemFailure(
-  Configuration conf,
-  Class clazz,
-  String text) throws Exception {
-interceptC(clazz,
-text,
-() -> new Path(getFileSystem().getUri()).getFileSystem(conf));
-  }
-
-  /**
-   * Experimental variant of intercept() which closes any Closeable
-   * returned.
-   */
-  private static  E interceptC(
-  Class clazz, String text,
-  Callable eval)
-  throws Exception {
-
-return intercept(clazz, text,
-() -> {
-  try (Closeable c = eval.call()) {
-return c.toString();
-  }
-});
-  }
-
-  @Test
-  public void testCreateCredentialProvider() throws IOException {
-assumeRoleTests();
-describe("Create the credential provider");
-
-String roleARN = getAssumedRoleARN();
-
-Configuration conf = new Configuration(getContract().getConf());
-conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
-conf.set(ASSUMED_ROLE_ARN, roleARN);
-conf.set(ASSUMED_ROLE_SESSION_NAME, "valid");
-conf.set(ASSUMED_ROLE_SESSION_DURATION, "45m");
-conf.set(ASSUMED_ROLE_POLICY, RESTRICTED_POLICY);
-try (AssumedRoleCredentialProvider provider
- = new AssumedRoleCredentialProvider(conf)) {
-  LOG.info("Provider is {}", provider);
-  AWSCredentials credentials = provider.getCredentials();
-  assertNotNull("Null credentials from " + provider, credentials);
-}
-  }
-
-  @Test
-  public void testAssumeRoleCreateFS() throws 

[19/50] [abbrv] hadoop git commit: YARN-7739. DefaultAMSProcessor should properly check customized resource types against minimum/maximum allocation. (wangda)

2018-02-15 Thread aengineer
YARN-7739. DefaultAMSProcessor should properly check customized resource types 
against minimum/maximum allocation. (wangda)

Change-Id: I10cc9341237d9a2fc0f8c855efb98a36b91389e2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d02e42ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d02e42ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d02e42ce

Branch: refs/heads/HDFS-7240
Commit: d02e42cee4a08a47ed2835f7a4a100daaa95833f
Parents: d4c9857
Author: Wangda Tan 
Authored: Mon Feb 12 10:29:37 2018 +0800
Committer: Wangda Tan 
Committed: Mon Feb 12 10:29:37 2018 +0800

--
 .../scheduler/SchedulerUtils.java   |  36 ++--
 .../TestApplicationMasterService.java   | 190 +++
 .../scheduler/capacity/TestUtils.java   |   8 +-
 3 files changed, 214 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d02e42ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 32f5824..0080a29 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
@@ -51,6 +52,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
@@ -276,23 +278,23 @@ public class SchedulerUtils {
   throw new InvalidResourceRequestException(ye);
 }
 
-if (resReq.getCapability().getMemorySize() < 0 ||
-resReq.getCapability().getMemorySize() > 
maximumResource.getMemorySize()) {
-  throw new InvalidResourceRequestException("Invalid resource request"
-  + ", requested memory < 0"
-  + ", or requested memory > max configured"
-  + ", requestedMemory=" + resReq.getCapability().getMemorySize()
-  + ", maxMemory=" + maximumResource.getMemorySize());
-}
-if (resReq.getCapability().getVirtualCores() < 0 ||
-resReq.getCapability().getVirtualCores() >
-maximumResource.getVirtualCores()) {
-  throw new InvalidResourceRequestException("Invalid resource request"
-  + ", requested virtual cores < 0"
-  + ", or requested virtual cores > max configured"
-  + ", requestedVirtualCores="
-  + resReq.getCapability().getVirtualCores()
-  + ", maxVirtualCores=" + maximumResource.getVirtualCores());
+Resource requestedResource = resReq.getCapability();
+for (int i = 0; i < ResourceUtils.getNumberOfKnownResourceTypes(); i++) {
+  ResourceInformation reqRI = requestedResource.getResourceInformation(i);
+  ResourceInformation maxRI = maximumResource.getResourceInformation(i);
+  if (reqRI.getValue() < 0 || reqRI.getValue() > maxRI.getValue()) {
+throw new InvalidResourceRequestException(
+"Invalid resource request, requested resource type=[" + reqRI
+.getName()
++ "] < 0 or greater than maximum allowed allocation. Requested 
"
++ "resource=" + requestedResource
++ ", maximum allowed allocation=" + maximumResource
++ ", please note that maximum allowed allocation is calculated 
"
+ 

[14/50] [abbrv] hadoop git commit: HDFS-13130. Log object instance get incorrectly in SlowDiskTracker. Contributed by Jianfei Jiang.

2018-02-15 Thread aengineer
HDFS-13130. Log object instance get incorrectly in SlowDiskTracker. Contributed 
by Jianfei Jiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25fbec67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25fbec67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25fbec67

Branch: refs/heads/HDFS-7240
Commit: 25fbec67d1c01cc3531b51d9e2ec03e5c3591a7e
Parents: 60f9e60
Author: Yiqun Lin 
Authored: Sun Feb 11 12:02:10 2018 +0800
Committer: Yiqun Lin 
Committed: Sun Feb 11 12:02:10 2018 +0800

--
 .../apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25fbec67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
index 051121e..d0d1ee4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
@@ -53,7 +53,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 @InterfaceStability.Unstable
 public class SlowDiskTracker {
   public static final Logger LOG =
-  LoggerFactory.getLogger(SlowPeerTracker.class);
+  LoggerFactory.getLogger(SlowDiskTracker.class);
 
   /**
* Time duration after which a report is considered stale. This is


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: Preparing for 3.2.0 development

2018-02-15 Thread aengineer
Preparing for 3.2.0 development

Change-Id: I6d0e01f3d665d26573ef2b957add1cf0cddf7938


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60f9e60b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60f9e60b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60f9e60b

Branch: refs/heads/HDFS-7240
Commit: 60f9e60b3b417c800683c87669b6f5410ac65066
Parents: c97d5bc
Author: Wangda Tan 
Authored: Sun Feb 11 11:17:38 2018 +0800
Committer: Wangda Tan 
Committed: Sun Feb 11 11:17:38 2018 +0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-uploader/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-fs2img/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml

[01/50] [abbrv] hadoop git commit: HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up. Contributed by Jianfei Jiang.

2018-02-15 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 fc84744f7 -> a2ffd9cea


HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one 
namenode is up. Contributed by Jianfei Jiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01bd6ab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01bd6ab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01bd6ab1

Branch: refs/heads/HDFS-7240
Commit: 01bd6ab18fa48f4c7cac1497905b52e547962599
Parents: 266da25
Author: Brahma Reddy Battula 
Authored: Wed Feb 7 23:10:33 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Feb 7 23:10:33 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/HAUtil.java |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 192 +---
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 464 ++-
 4 files changed, 602 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 3556086..1d294be 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import 
org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -325,6 +326,7 @@ public class HAUtil {
*/
   public static boolean isAtLeastOneActive(List namenodes)
   throws IOException {
+List exceptions = new ArrayList<>();
 for (ClientProtocol namenode : namenodes) {
   try {
 namenode.getFileInfo("/");
@@ -334,10 +336,15 @@ public class HAUtil {
 if (cause instanceof StandbyException) {
   // This is expected to happen for a standby NN.
 } else {
-  throw re;
+  exceptions.add(re);
 }
+  } catch (IOException ioe) {
+exceptions.add(ioe);
   }
 }
+if(!exceptions.isEmpty()){
+  throw MultipleIOException.createIOException(exceptions);
+}
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ece649d..0c9b875 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4437,7 +4437,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   void setBalancerBandwidth(long bandwidth) throws IOException {
 String operationName = "setBalancerBandwidth";
-checkOperation(OperationCategory.UNCHECKED);
+checkOperation(OperationCategory.WRITE);
 checkSuperuserPrivilege(operationName);
 getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
 logAuditEvent(true, operationName, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 1bedd82..023fea9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import 

[35/50] [abbrv] hadoop git commit: HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions. Contributed by Andras Bokor.

2018-02-15 Thread aengineer
HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions.
Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f20dc0d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f20dc0d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f20dc0d5

Branch: refs/heads/HDFS-7240
Commit: f20dc0d5770d3876954faf0a6e8dcce6539ffc23
Parents: 042ef2f
Author: Steve Loughran 
Authored: Wed Feb 14 16:20:14 2018 +
Committer: Steve Loughran 
Committed: Wed Feb 14 16:20:14 2018 +

--
 .../org/apache/hadoop/fs/LocalFileSystem.java   |   2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |  30 +-
 .../apache/hadoop/ha/FailoverController.java|  20 +-
 .../org/apache/hadoop/ha/HealthMonitor.java |   9 +-
 .../org/apache/hadoop/io/retry/RetryUtils.java  |  11 +-
 .../main/java/org/apache/hadoop/net/DNS.java|  39 +-
 .../apache/hadoop/service/AbstractService.java  |  27 +-
 .../hadoop/service/ServiceOperations.java   |   6 +-
 .../hadoop/service/TestServiceOperations.java   |   3 +-
 .../hadoop/hdfs/nfs/nfs3/DFSClientCache.java|  25 +-
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   | 314 +++-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java| 370 +--
 .../hadoop/hdfs/server/datanode/DataNode.java   | 211 +--
 .../hdfs/server/datanode/DataXceiver.java   | 172 -
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../server/namenode/ha/StandbyCheckpointer.java |  34 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   2 +-
 .../hadoop/test/MiniDFSClusterManager.java  |  26 +-
 .../apache/hadoop/mapred/gridmix/Gridmix.java   |  22 +-
 .../swift/http/HttpInputStreamWithRelease.java  |  29 +-
 20 files changed, 587 insertions(+), 767 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
index 91b2315..538ccdf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
@@ -139,7 +139,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
   LOG.warn("Ignoring failure of renameTo");
 }
 } catch (IOException e) {
-  LOG.warn("Error moving bad file " + p + ": " + e);
+  LOG.warn("Error moving bad file " + p, e);
 }
 return false;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index 93fd2cf..a23fb71 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -888,9 +888,8 @@ public class ActiveStandbyElector implements StatCallback, 
StringCallback {
   Stat oldBreadcrumbStat = fenceOldActive();
   writeBreadCrumbNode(oldBreadcrumbStat);
 
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Becoming active for " + this);
-  }
+  LOG.debug("Becoming active for {}", this);
+
   appClient.becomeActive();
   state = State.ACTIVE;
   return true;
@@ -910,8 +909,8 @@ public class ActiveStandbyElector implements StatCallback, 
StringCallback {
   throws KeeperException, InterruptedException {
 Preconditions.checkState(appData != null, "no appdata");
 
-LOG.info("Writing znode " + zkBreadCrumbPath +
-" to indicate that the local node is the most recent active...");
+LOG.info("Writing znode {} to indicate that the local " +
+"node is the most recent active...", zkBreadCrumbPath);
 if (oldBreadcrumbStat == null) {
   // No previous active, just create the node
   createWithRetries(zkBreadCrumbPath, appData, zkAcl,
@@ -948,9 +947,8 @@ public class ActiveStandbyElector implements StatCallback, 
StringCallback {
   
   deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
 } catch (Exception e) {
-  LOG.warn("Unable to delete our own bread-crumb of being active 

[09/50] [abbrv] hadoop git commit: YARN-7827. Stop and Delete Yarn Service from RM UI fails with HTTP ERROR 404. Contributed by Sunil G

2018-02-15 Thread aengineer
YARN-7827. Stop and Delete Yarn Service from RM UI fails with HTTP ERROR 404. 
Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddec08d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddec08d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddec08d7

Branch: refs/heads/HDFS-7240
Commit: ddec08d7ccc8e43492fca2784203bd8af5e968cc
Parents: 1bc03dd
Author: Jian He 
Authored: Thu Feb 8 21:32:02 2018 -0800
Committer: Jian He 
Committed: Thu Feb 8 21:32:40 2018 -0800

--
 .../src/main/webapp/app/adapters/yarn-servicedef.js |  9 ++---
 .../src/main/webapp/app/components/deploy-service.js| 12 +---
 .../src/main/webapp/app/controllers/yarn-app.js |  4 ++--
 .../src/main/webapp/app/controllers/yarn-app/info.js|  4 ++--
 .../main/webapp/app/controllers/yarn-deploy-service.js  | 12 ++--
 .../webapp/app/templates/components/deploy-service.hbs  | 10 ++
 .../src/main/webapp/app/templates/yarn-app.hbs  |  4 ++--
 .../src/main/webapp/app/templates/yarn-app/info.hbs |  4 ++--
 .../src/main/webapp/app/utils/info-seeder.js|  3 ++-
 .../src/main/webapp/config/default-config.js|  2 +-
 10 files changed, 42 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
index 3fb4a81..03685fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
@@ -24,21 +24,24 @@ export default RESTAbstractAdapter.extend({
   restNameSpace: "dashService",
   serverName: "DASH",
 
-  deployService(request) {
+  deployService(request, user) {
 var url = this.buildURL();
+url += "/?user.name=" + user;
 return this.ajax(url, "POST", {data: request});
   },
 
-  stopService(serviceName) {
+  stopService(serviceName, user) {
 var url = this.buildURL();
 url += "/" + serviceName;
+url += "/?user.name=" + user;
 var data = {"state": "STOPPED", "name": serviceName};
 return this.ajax(url, "PUT", {data: data});
   },
 
-  deleteService(serviceName) {
+  deleteService(serviceName, user) {
 var url = this.buildURL();
 url += "/" + serviceName;
+url += "/?user.name=" + user;
 return this.ajax(url, "DELETE", {data: {}});
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
index 90e10e5..36895d7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
@@ -27,6 +27,7 @@ export default Ember.Component.extend({
   customServiceDef: '',
   serviceResp: null,
   isLoading: false,
+  userName: '',
 
   actions: {
 showSaveTemplateModal() {
@@ -36,11 +37,11 @@ export default Ember.Component.extend({
 deployService() {
   this.set('serviceResp', null);
   if (this.get('isStandardViewType')) {
-this.sendAction("deployServiceDef", this.get('serviceDef'));
+this.sendAction("deployServiceDef", this.get('serviceDef'), 
this.get('userName'));
   } else {
 try {
   var parsed = JSON.parse(this.get('customServiceDef'));
-  this.sendAction("deployServiceJson", parsed);
+  this.sendAction("deployServiceJson", parsed, this.get('userName'));
 } catch (err) {
   this.set('serviceResp', {type: 'error', message: 'Invalid JSON: ' + 
err.message});
   throw err;
@@ -148,16 +149,21 @@ export default Ember.Component.extend({
 
   isValidTemplateName: Ember.computed.notEmpty('savedTemplateName'),
 
+  isUserNameGiven: Ember.computed.empty('userName'),
+
   isValidServiceDef: Ember.computed('serviceDef.name', 'serviceDef.queue', 
'serviceDef.serviceComponents.[]', function () {
 return this.get('serviceDef').isValidServiceDef();
   }),
 
   isValidCustomServiceDef: 

[24/50] [abbrv] hadoop git commit: MAPREDUCE-7048. Uber AM can crash due to unknown task in statusUpdate. Contributed by Peter Bacsko

2018-02-15 Thread aengineer
MAPREDUCE-7048. Uber AM can crash due to unknown task in statusUpdate. 
Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87e2570a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87e2570a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87e2570a

Branch: refs/heads/HDFS-7240
Commit: 87e2570a1419d3616de2de3b553108ad1a8af425
Parents: 35c1735
Author: Jason Lowe 
Authored: Mon Feb 12 13:21:09 2018 -0600
Committer: Jason Lowe 
Committed: Mon Feb 12 13:21:09 2018 -0600

--
 .../java/org/apache/hadoop/mapred/Task.java | 16 ++--
 .../java/org/apache/hadoop/mapred/TestTask.java | 89 
 2 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e2570a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index 5b98b35..d83a6b0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -200,6 +200,7 @@ abstract public class Task implements Writable, 
Configurable {
   protected SecretKey shuffleSecret;
   protected GcTimeUpdater gcUpdater;
   final AtomicBoolean mustPreempt = new AtomicBoolean(false);
+  private boolean uberized = false;
 
   
   // Constructors
@@ -855,9 +856,6 @@ abstract public class Task implements Writable, 
Configurable {
   long taskProgressInterval = MRJobConfUtil.
   getTaskProgressReportInterval(conf);
 
-  boolean uberized = conf.getBoolean("mapreduce.task.uberized",
-  false);
-
   while (!taskDone.get()) {
 synchronized (lock) {
   done = false;
@@ -1301,11 +1299,17 @@ abstract public class Task implements Writable, 
Configurable {
   public void statusUpdate(TaskUmbilicalProtocol umbilical) 
   throws IOException {
 int retries = MAX_RETRIES;
+
 while (true) {
   try {
 if (!umbilical.statusUpdate(getTaskID(), taskStatus).getTaskFound()) {
-  LOG.warn("Parent died.  Exiting "+taskId);
-  System.exit(66);
+  if (uberized) {
+LOG.warn("Task no longer available: " + taskId);
+break;
+  } else {
+LOG.warn("Parent died.  Exiting " + taskId);
+ExitUtil.terminate(66);
+  }
 }
 taskStatus.clearStatus();
 return;
@@ -1518,6 +1522,8 @@ abstract public class Task implements Writable, 
Configurable {
 NetUtils.addStaticResolution(name, resolvedName);
   }
 }
+
+uberized = conf.getBoolean("mapreduce.task.uberized", false);
   }
 
   public Configuration getConf() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e2570a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTask.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTask.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTask.java
new file mode 100644
index 000..500229c
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTask.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * 

[17/50] [abbrv] hadoop git commit: YARN-5848. Remove unnecessary public/crossdomain.xml from YARN UIv2 sub project. (Sunil G via wangda)

2018-02-15 Thread aengineer
YARN-5848. Remove unnecessary public/crossdomain.xml from YARN UIv2 sub 
project. (Sunil G via wangda)

Change-Id: Ie295f88232192e6b520c335b0332383cc6a232c0


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/789a185c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/789a185c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/789a185c

Branch: refs/heads/HDFS-7240
Commit: 789a185c16351d2343e075413a50eb3e5849cc5f
Parents: e795833
Author: Wangda Tan 
Authored: Mon Feb 12 10:27:15 2018 +0800
Committer: Wangda Tan 
Committed: Mon Feb 12 10:27:15 2018 +0800

--
 .../hadoop-yarn-ui/public/crossdomain.xml| 15 ---
 1 file changed, 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/789a185c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/crossdomain.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/crossdomain.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/crossdomain.xml
deleted file mode 100644
index 0c16a7a..000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/crossdomain.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-
-http://www.adobe.com/xml/dtds/cross-domain-policy.dtd;>
-
-  
-
-  
-  
-
-  
-  
-


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: Revert "HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar."

2018-02-15 Thread aengineer
Revert "HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO 
failures. Contributed by Ajay Kumar."

This reverts commit 332269de065d0f40eb54ee5e53b765217c24081e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f20f432
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f20f432
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f20f432

Branch: refs/heads/HDFS-7240
Commit: 1f20f432d2472f92797ea01711ca4cc97e7b2b23
Parents: f20dc0d
Author: Xiao Chen 
Authored: Wed Feb 14 10:22:37 2018 -0800
Committer: Xiao Chen 
Committed: Wed Feb 14 10:25:05 2018 -0800

--
 .../client/KerberosAuthenticator.java   | 80 +++-
 .../client/TestKerberosAuthenticator.java   | 29 ---
 2 files changed, 27 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f20f432/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 64d4330..942d13c 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -13,8 +13,6 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
-import com.google.common.annotations.VisibleForTesting;
-import java.lang.reflect.Constructor;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
@@ -179,62 +177,38 @@ public class KerberosAuthenticator implements 
Authenticator {
*/
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
-  throws IOException, AuthenticationException {
+throws IOException, AuthenticationException {
 if (!token.isSet()) {
   this.url = url;
   base64 = new Base64(0);
-  try {
-HttpURLConnection conn = token.openConnection(url, connConfigurator);
-conn.setRequestMethod(AUTH_HTTP_METHOD);
-conn.connect();
-
-boolean needFallback = false;
-if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
-  LOG.debug("JDK performed authentication on our behalf.");
-  // If the JDK already did the SPNEGO back-and-forth for
-  // us, just pull out the token.
-  AuthenticatedURL.extractToken(conn, token);
-  if (isTokenKerberos(token)) {
-return;
-  }
-  needFallback = true;
+  HttpURLConnection conn = token.openConnection(url, connConfigurator);
+  conn.setRequestMethod(AUTH_HTTP_METHOD);
+  conn.connect();
+  
+  boolean needFallback = false;
+  if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+LOG.debug("JDK performed authentication on our behalf.");
+// If the JDK already did the SPNEGO back-and-forth for
+// us, just pull out the token.
+AuthenticatedURL.extractToken(conn, token);
+if (isTokenKerberos(token)) {
+  return;
 }
-if (!needFallback && isNegotiate(conn)) {
-  LOG.debug("Performing our own SPNEGO sequence.");
-  doSpnegoSequence(token);
-} else {
-  LOG.debug("Using fallback authenticator sequence.");
-  Authenticator auth = getFallBackAuthenticator();
-  // Make sure that the fall back authenticator have the same
-  // ConnectionConfigurator, since the method might be overridden.
-  // Otherwise the fall back authenticator might not have the
-  // information to make the connection (e.g., SSL certificates)
-  auth.setConnectionConfigurator(connConfigurator);
-  auth.authenticate(url, token);
-}
-  } catch (IOException ex){
-throw wrapExceptionWithMessage(ex,
-"Error while authenticating with endpoint: " + url);
-  } catch (AuthenticationException ex){
-throw wrapExceptionWithMessage(ex,
-"Error while authenticating with endpoint: " + url);
+needFallback = true;
+  }
+  if (!needFallback && isNegotiate(conn)) {
+LOG.debug("Performing our own SPNEGO sequence.");
+doSpnegoSequence(token);
+  } else {
+LOG.debug("Using fallback authenticator sequence.");
+  

[05/50] [abbrv] hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

2018-02-15 Thread aengineer
HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8faf0b50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8faf0b50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8faf0b50

Branch: refs/heads/HDFS-7240
Commit: 8faf0b50d435039f69ea35f592856ca04d378809
Parents: f491f71
Author: Xiaoyu Yao 
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 08:59:48 2018 -0800

--
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++
 2 files changed, 129 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8faf0b50/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 6a41cd8..4cc5389 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -253,7 +253,9 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.clearBlocks();
-nodeToRemove.getParent().removeChild(nodeToRemove);
+// Ensure the nodeToRemove is cleared from snapshot diff list
+nodeToRemove.getParent().removeChild(nodeToRemove,
+targetIIP.getLatestSnapshotId());
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8faf0b50/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
 // make sure bar has been cleaned from inodeMap
 Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+final Path st = new Path("/st");
+hdfs.mkdirs(st);
+hdfs.allowSnapshot(st);
+
+Path[] files = new Path[3];
+for (int i = 0; i < 3; i++) {
+  files[i] = new Path(st, i+ ".txt");
+}
+
+Path dest = new Path(st, "dest.txt");
+  

[04/50] [abbrv] hadoop git commit: HDFS-13115. In getNumUnderConstructionBlocks(), ignore the inodeIds for which the inodes have been deleted. Contributed by Yongjun Zhang.

2018-02-15 Thread aengineer
HDFS-13115. In getNumUnderConstructionBlocks(), ignore the inodeIds for which 
the inodes have been deleted. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f491f717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f491f717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f491f717

Branch: refs/heads/HDFS-7240
Commit: f491f717e9ee6b75ad5cfca48da9c6297e94a8f7
Parents: b061215
Author: Yongjun Zhang 
Authored: Wed Feb 7 12:58:09 2018 -0800
Committer: Yongjun Zhang 
Committed: Wed Feb 7 12:58:09 2018 -0800

--
 .../hadoop/hdfs/server/namenode/LeaseManager.java  | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f491f717/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 1e7a174..31fb2bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -144,7 +144,15 @@ public class LeaseManager {
   + "acquired before counting under construction blocks";
 long numUCBlocks = 0;
 for (Long id : getINodeIdWithLeases()) {
-  final INodeFile cons = 
fsnamesystem.getFSDirectory().getInode(id).asFile();
+  INode inode = fsnamesystem.getFSDirectory().getInode(id);
+  if (inode == null) {
+// The inode could have been deleted after getINodeIdWithLeases() is
+// called, check here, and ignore it if so
+LOG.warn("Failed to find inode {} in getNumUnderConstructionBlocks().",
+id);
+continue;
+  }
+  final INodeFile cons = inode.asFile();
   if (!cons.isUnderConstruction()) {
 LOG.warn("The file {} is not under construction but has lease.",
 cons.getFullPathName());
@@ -155,10 +163,11 @@ public class LeaseManager {
 continue;
   }
   for(BlockInfo b : blocks) {
-if(!b.isComplete())
+if(!b.isComplete()) {
   numUCBlocks++;
 }
   }
+}
 LOG.info("Number of blocks under construction: {}", numUCBlocks);
 return numUCBlocks;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2018-02-15 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 58076f5,000..65b8726
mode 100644,00..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@@ -1,1095 -1,0 +1,1100 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + *  with the License.  You may obtain a copy of the License at
 + *
 + *  http://www.apache.org/licenses/LICENSE-2.0
 + *
 + *  Unless required by applicable law or agreed to in writing, software
 + *  distributed under the License is distributed on an "AS IS" BASIS,
 + *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + *  See the License for the specific language governing permissions and
 + *  limitations under the License.
 + */
 +
 +package org.apache.hadoop.ozone.container.common.impl;
 +
 +import com.google.common.annotations.VisibleForTesting;
 +import com.google.common.base.Preconditions;
 +import org.apache.commons.codec.digest.DigestUtils;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
 +import org.apache.hadoop.hdfs.protocol.DatanodeID;
 +import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 +import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 +import org.apache.hadoop.ozone.container.common.interfaces.*;
 +import 
org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
 +import 
org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
 +import org.apache.hadoop.util.ReflectionUtils;
 +import 
org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos;
 +import 
org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState;
 +import org.apache.hadoop.ozone.protocol.proto
 +.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 +import org.apache.hadoop.ozone.protocol.proto
 +.StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 +import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 +import org.apache.hadoop.io.IOUtils;
 +import org.apache.hadoop.ozone.OzoneConsts;
 +import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 +import org.apache.hadoop.scm.ScmConfigKeys;
 +import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 +import org.apache.hadoop.utils.MetadataKeyFilters;
 +import org.apache.hadoop.utils.MetadataStore;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileOutputStream;
 +import java.io.FilenameFilter;
 +import java.io.IOException;
 +import java.nio.file.Path;
 +import java.nio.file.Paths;
 +import java.security.DigestInputStream;
 +import java.security.DigestOutputStream;
 +import java.security.MessageDigest;
 +import java.security.NoSuchAlgorithmException;
 +import java.util.LinkedList;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.ConcurrentNavigableMap;
 +import java.util.concurrent.ConcurrentSkipListMap;
 +import java.util.concurrent.locks.ReentrantReadWriteLock;
 +import java.util.stream.Collectors;
 +
 +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +.Result.CONTAINER_EXISTS;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +.Result.CONTAINER_INTERNAL_ERROR;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +.Result.CONTAINER_NOT_FOUND;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +.Result.INVALID_CONFIG;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +.Result.IO_EXCEPTION;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +.Result.NO_SUCH_ALGORITHM;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +.Result.UNABLE_TO_READ_METADATA_DB;
 +import static 

[21/50] [abbrv] hadoop git commit: HDFS-10453. ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster.. Contributed by He Xi

2018-02-15 Thread aengineer
HDFS-10453. ReplicationMonitor thread could stuck for long time due to the race 
between replication and delete of same file in a large cluster.. Contributed by 
He Xiaoqiao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96bb6a51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96bb6a51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96bb6a51

Branch: refs/heads/HDFS-7240
Commit: 96bb6a51ec4a470e9b287c94e377444a9f97c410
Parents: 8cf88fc
Author: Arpit Agarwal 
Authored: Mon Feb 12 07:00:50 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Feb 12 07:17:40 2018 -0800

--
 .../hdfs/server/blockmanagement/BlockReconstructionWork.java   | 6 ++
 .../hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java  | 3 +--
 .../hadoop/hdfs/server/blockmanagement/ReplicationWork.java| 6 ++
 3 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96bb6a51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
index 3f591e8..d383191 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
@@ -33,6 +33,7 @@ abstract class BlockReconstructionWork {
   private final BlockInfo block;
 
   private final String srcPath;
+  private final long blockSize;
   private final byte storagePolicyID;
 
   /**
@@ -59,6 +60,7 @@ abstract class BlockReconstructionWork {
   int priority) {
 this.block = block;
 this.srcPath = bc.getName();
+this.blockSize = block.getNumBytes();
 this.storagePolicyID = bc.getStoragePolicyID();
 this.srcNodes = srcNodes;
 this.containingNodes = containingNodes;
@@ -100,6 +102,10 @@ abstract class BlockReconstructionWork {
 return srcPath;
   }
 
+  public long getBlockSize() {
+return blockSize;
+  }
+
   public byte getStoragePolicyID() {
 return storagePolicyID;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96bb6a51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
index a23b1d5..147f8cf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
@@ -59,8 +59,7 @@ class ErasureCodingWork extends BlockReconstructionWork {
 // TODO: new placement policy for EC considering multiple writers
 DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
 getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
-getLiveReplicaStorages(), false, excludedNodes,
-getBlock().getNumBytes(),
+getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
 storagePolicySuite.getPolicy(getStoragePolicyID()), null);
 setTargets(chosenTargets);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96bb6a51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index 26c38cb..f250bcb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -45,10 +45,8 @@ class ReplicationWork extends BlockReconstructionWork {
 try {
   DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
   getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
-  

[26/50] [abbrv] hadoop git commit: HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine

2018-02-15 Thread aengineer
HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh 
may not be accessible. Contributed by Grigori Rybkine


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b88cb33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b88cb33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b88cb33

Branch: refs/heads/HDFS-7240
Commit: 5b88cb339898f82519223bcd07e1caedff02d051
Parents: 5a1db60
Author: Chris Douglas 
Authored: Mon Feb 12 21:00:47 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:00:47 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 +++
 start-build-env.sh  |  32 +-
 2 files changed, 131 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b88cb33/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
new file mode 100644
index 000..0c32bcf
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+shift
+echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+shift
+until [ $# -eq 0 ]; do
+  if [ "$1" = -v ]; then
+shift
+echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+  printf "Mounted %s with %s option.\n", $1, $3
+  else if (NF == 2)
+  printf "Mounted %s without %s option.\n", $1, "z"}'
+  fi
+  shift
+done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+if [ -z "$USER" ]; then
+  USER=${HOME##*/}
+fi
+export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+printf 'mock_u:mock_r:mock_t:s0'
+  else
+command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux policy is enforced." ]]
+  [[ ${lines[1]} =~ \
+ "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+ "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b88cb33/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index 5a18151..60efea5 

[29/50] [abbrv] hadoop git commit: YARN-7813: Capacity Scheduler Intra-queue Preemption should be configurable for each queue

2018-02-15 Thread aengineer
YARN-7813: Capacity Scheduler Intra-queue Preemption should be configurable for 
each queue


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5e6e3de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5e6e3de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5e6e3de

Branch: refs/heads/HDFS-7240
Commit: c5e6e3de1c31eda052f89eddd7bba288625936b9
Parents: 0c5d7d7
Author: Eric Payne 
Authored: Tue Feb 13 10:11:02 2018 -0600
Committer: Eric Payne 
Committed: Tue Feb 13 10:11:02 2018 -0600

--
 .../hadoop/yarn/api/records/QueueInfo.java  | 35 +++
 .../src/main/proto/yarn_protos.proto|  1 +
 .../apache/hadoop/yarn/client/cli/QueueCLI.java |  6 ++
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 96 ++--
 .../api/records/impl/pb/QueueInfoPBImpl.java| 13 +++
 .../hadoop/yarn/api/TestPBImplRecords.java  |  2 +-
 .../capacity/IntraQueueCandidatesSelector.java  |  4 +-
 .../scheduler/capacity/AbstractCSQueue.java | 72 +--
 .../scheduler/capacity/CSQueue.java | 16 +++-
 .../CapacitySchedulerConfiguration.java | 15 +++
 .../webapp/CapacitySchedulerPage.java   |  5 +-
 .../dao/CapacitySchedulerLeafQueueInfo.java |  6 ++
 .../TestConfigurationMutationACLPolicies.java   |  2 +-
 .../TestSchedulerApplicationAttempt.java|  2 +-
 .../scheduler/capacity/TestLeafQueue.java   |  2 +-
 .../src/site/markdown/CapacityScheduler.md  |  3 +-
 17 files changed, 257 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
index 897b442..57ea9bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
@@ -94,6 +94,26 @@ public abstract class QueueInfo {
 return queueInfo;
   }
 
+  @Private
+  @Unstable
+  public static QueueInfo newInstance(String queueName, float capacity,
+  float maximumCapacity, float currentCapacity,
+  List childQueues, List applications,
+  QueueState queueState, Set accessibleNodeLabels,
+  String defaultNodeLabelExpression, QueueStatistics queueStatistics,
+  boolean preemptionDisabled,
+  Map queueConfigurations,
+  boolean intraQueuePreemptionDisabled) {
+QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity,
+maximumCapacity, currentCapacity,
+childQueues, applications,
+queueState, accessibleNodeLabels,
+defaultNodeLabelExpression, queueStatistics,
+preemptionDisabled, queueConfigurations);
+queueInfo.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
+return queueInfo;
+  }
+
   /**
* Get the name of the queue.
* @return name of the queue
@@ -261,4 +281,19 @@ public abstract class QueueInfo {
   @Unstable
   public abstract void setQueueConfigurations(
   Map queueConfigurations);
+
+
+  /**
+   * Get the intra-queue preemption status of the queue.
+   * @return if property is not in proto, return null;
+   *otherwise, return intra-queue preemption status of the queue
+   */
+  @Public
+  @Stable
+  public abstract Boolean getIntraQueuePreemptionDisabled();
+
+  @Private
+  @Unstable
+  public abstract void setIntraQueuePreemptionDisabled(
+  boolean intraQueuePreemptionDisabled);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 25c8569..b978761 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -569,6 +569,7 @@ message QueueInfoProto {
   optional QueueStatisticsProto queueStatistics = 10;
   optional bool preemptionDisabled = 11;
   repeated QueueConfigurationsMapProto 

[47/50] [abbrv] hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

2018-02-15 Thread aengineer
HDFS-13112. Token expiration edits may cause log corruption or deadlock. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47473952
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47473952
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47473952

Branch: refs/heads/HDFS-7240
Commit: 47473952e56b0380147d42f4110ad03c2276c961
Parents: a53d62a
Author: Kihwal Lee 
Authored: Thu Feb 15 15:32:42 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 15:32:42 2018 -0600

--
 .../DelegationTokenSecretManager.java   | 53 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java |  5 +-
 .../namenode/TestSecurityTokenEditLog.java  | 24 -
 5 files changed, 83 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47473952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index b7f89a8..3547c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before updating master key");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logUpdateMasterKey(key);
+}
+  } finally {
+namesystem.readUnlock();
   }
-  namesystem.logUpdateMasterKey(key);
+} catch (InterruptedException ie) {
+  // AbstractDelegationTokenManager may crash if an exception is thrown.
+  // The interrupt flag will be detected when it attempts to sleep.
+  Thread.currentThread().interrupt();
 }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before expiring delegation token");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logExpireDelegationToken(dtId);
+}
+  } finally {
+

[07/50] [abbrv] hadoop git commit: YARN-5428. Allow for specifying the docker client configuration directory. Contributed by Shane Kumpf

2018-02-15 Thread aengineer
YARN-5428. Allow for specifying the docker client configuration directory. 
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb2449d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb2449d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb2449d5

Branch: refs/heads/HDFS-7240
Commit: eb2449d5398e9ac869bc088e10d838a7f13deac0
Parents: 996796f
Author: Jian He 
Authored: Wed Feb 7 10:59:38 2018 -0800
Committer: Jian He 
Committed: Thu Feb 8 11:35:30 2018 -0800

--
 .../applications/distributedshell/Client.java   |  38 +++-
 .../DockerCredentialTokenIdentifier.java| 159 
 .../yarn/util/DockerClientConfigHandler.java| 183 +++
 .../src/main/proto/yarn_security_token.proto|   5 +
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 .../security/TestDockerClientConfigHandler.java | 129 +
 .../runtime/DockerLinuxContainerRuntime.java|  39 
 .../linux/runtime/docker/DockerCommand.java |  16 ++
 .../runtime/TestDockerContainerRuntime.java | 109 +++
 .../runtime/docker/TestDockerRunCommand.java|   8 +
 .../src/site/markdown/DockerContainers.md   |  13 +-
 11 files changed, 690 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 2aafa94..0aef83f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -87,6 +87,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -225,6 +226,9 @@ public class Client {
   private String flowVersion = null;
   private long flowRunId = 0L;
 
+  // Docker client configuration
+  private String dockerClientConfig = null;
+
   // Command line options
   private Options opts;
 
@@ -368,6 +372,10 @@ public class Client {
 "If container could retry, it specifies max retires");
 opts.addOption("container_retry_interval", true,
 "Interval between each retry, unit is milliseconds");
+opts.addOption("docker_client_config", true,
+"The docker client configuration path. The scheme should be supplied"
++ " (i.e. file:// or hdfs://)."
++ " Only used when the Docker runtime is enabled and requested.");
 opts.addOption("placement_spec", true,
 "Placement specification. Please note, if this option is specified,"
 + " The \"num_containers\" option will be ignored. All requested"
@@ -585,6 +593,9 @@ public class Client {
 "Flow run is not a valid long value", e);
   }
 }
+if (cliParser.hasOption("docker_client_config")) {
+  dockerClientConfig = cliParser.getOptionValue("docker_client_config");
+}
 return true;
   }
 
@@ -884,9 +895,10 @@ public class Client {
 // amContainer.setServiceData(serviceData);
 
 // Setup security tokens
+Credentials rmCredentials = null;
 if (UserGroupInformation.isSecurityEnabled()) {
   // Note: Credentials class is marked as LimitedPrivate for HDFS and 
MapReduce
-  Credentials credentials = new Credentials();
+  rmCredentials = new Credentials();
   String tokenRenewer = YarnClientUtils.getRmPrincipal(conf);
   if (tokenRenewer == null || tokenRenewer.length() == 0) {
 throw new IOException(
@@ -895,16 +907,32 @@ public class Client {
 
   // For now, only getting tokens for the default file-system.
   final Token 

[10/50] [abbrv] hadoop git commit: HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by Yiqun Lin.

2018-02-15 Thread aengineer
HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/543f3abb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/543f3abb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/543f3abb

Branch: refs/heads/HDFS-7240
Commit: 543f3abbee79d7ec70353f0cdda6397ee001324e
Parents: ddec08d
Author: Yiqun Lin 
Authored: Fri Feb 9 13:57:42 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Feb 9 13:57:42 2018 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 ++--
 .../src/main/resources/hdfs-default.xml| 10 --
 .../src/site/markdown/HDFSRouterFederation.md  |  2 +-
 .../server/federation/RouterConfigBuilder.java |  6 ++
 .../store/FederationStateStoreTestUtils.java   | 17 +++--
 5 files changed, 28 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e0b5b85..c0ad4ec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -34,8 +34,8 @@ import 
org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformance
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1275,7 +1275,7 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_STORE_DRIVER_CLASS =
   FEDERATION_STORE_PREFIX + "driver.class";
   public static final Class
-  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
 
   public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
   FEDERATION_STORE_PREFIX + "connection.test";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 59df122..f6d232e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -5085,9 +5085,15 @@
 
   
 dfs.federation.router.store.driver.class
-
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl
+
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl
 
-  Class to implement the State Store. By default it uses the local disk.
+  Class to implement the State Store. There are three implementation 
classes currently
+  being supported:
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl,
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl
 and
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.
+  These implementation classes use the local file, filesystem and 
ZooKeeper as a backend respectively.
+  By default it uses the ZooKeeper as the default State Store.
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index 5649755..ebe94a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -325,7 +325,7 @@ The connection to the State 

[46/50] [abbrv] hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread aengineer
MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a53d62ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a53d62ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a53d62ab

Branch: refs/heads/HDFS-7240
Commit: a53d62ab26e170a0338f93e228718da52e9196e4
Parents: da59acd
Author: Jason Lowe 
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:12:57 2018 -0600

--
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a53d62ab/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 4864dd0..5134729 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -301,7 +301,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-7906. Fix mvn site fails with error: Multiple sources of package comments found for package o.a.h.y.client.api.impl. (Akira Ajisaka via wangda)

2018-02-15 Thread aengineer
YARN-7906. Fix mvn site fails with error: Multiple sources of package comments 
found for package o.a.h.y.client.api.impl. (Akira Ajisaka via wangda)

Change-Id: I20221d97446e97f208d587eacbc60448c11ffd48


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e795833d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e795833d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e795833d

Branch: refs/heads/HDFS-7240
Commit: e795833d8c1981cab85a10b4e516cd0c5423c792
Parents: a08c048
Author: Wangda Tan 
Authored: Mon Feb 12 10:25:22 2018 +0800
Committer: Wangda Tan 
Committed: Mon Feb 12 10:25:22 2018 +0800

--
 .../java/org/apache/hadoop/yarn/api/resource/package-info.java | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e795833d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
index 660dc02..a9388b2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
@@ -18,6 +18,4 @@
 /**
  * API related to resources.
  */
-@InterfaceAudience.Private
 package org.apache.hadoop.yarn.api.resource;
-import org.apache.hadoop.classification.InterfaceAudience;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions. Contributed by Andras Bokor.

2018-02-15 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 0db633f..6e63543 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -28,8 +28,6 @@ import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
 import java.util.EnumSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
@@ -137,6 +135,8 @@ import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * RPC program corresponding to nfs daemon. See {@link Nfs3}.
@@ -146,7 +146,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
   public static final FsPermission umask = new FsPermission(
   (short) DEFAULT_UMASK);
 
-  static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class);
+  static final Logger LOG = LoggerFactory.getLogger(RpcProgramNfs3.class);
 
   private final NfsConfiguration config;
   private final WriteManager writeManager;
@@ -204,7 +204,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
 superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY,
 NfsConfigKeys.NFS_SUPERUSER_DEFAULT);
-LOG.info("Configured HDFS superuser is " + superuser);
+LOG.info("Configured HDFS superuser is {}", superuser);
 
 if (!enableDump) {
   writeDumpDir = null;
@@ -230,13 +230,13 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
   private void clearDirectory(String writeDumpDir) throws IOException {
 File dumpDir = new File(writeDumpDir);
 if (dumpDir.exists()) {
-  LOG.info("Delete current dump directory " + writeDumpDir);
+  LOG.info("Delete current dump directory {}", writeDumpDir);
   if (!(FileUtil.fullyDelete(dumpDir))) {
 throw new IOException("Cannot remove current dump directory: "
 + dumpDir);
   }
 }
-LOG.info("Create new dump directory " + writeDumpDir);
+LOG.info("Create new dump directory {}", writeDumpDir);
 if (!dumpDir.mkdirs()) {
   throw new IOException("Cannot create dump directory " + dumpDir);
 }
@@ -298,9 +298,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 
   @Override
   public NFS3Response nullProcedure() {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("NFS NULL");
-}
+LOG.debug("NFS NULL");
 return new NFS3Response(Nfs3Status.NFS3_OK);
   }
 
@@ -331,10 +329,9 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 FileHandle handle = request.getHandle();
 int namenodeId = handle.getNamenodeId();
 if (LOG.isDebugEnabled()) {
-  LOG.debug("GETATTR for fileHandle: " + handle.dumpFileHandle()
-  + " client: " + remoteAddress);
+  LOG.debug("GETATTR for fileHandle: {} client: {}",
+  handle.dumpFileHandle(), remoteAddress);
 }
-
 DFSClient dfsClient =
 clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
 if (dfsClient == null) {
@@ -346,7 +343,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 try {
   attrs = writeManager.getFileAttr(dfsClient, handle, iug);
 } catch (RemoteException r) {
-  LOG.warn("Exception ", r);
+  LOG.warn("Exception", r);
   IOException io = r.unwrapRemoteException();
   /**
* AuthorizationException can be thrown if the user can't be proxy'ed.
@@ -357,13 +354,13 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 return new GETATTR3Response(Nfs3Status.NFS3ERR_IO);
   }
 } catch (IOException e) {
-  LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e);
+  LOG.info("Can't get file attribute, fileId={}", handle.getFileId(), e);
   int status = mapErrorStatus(e);
   response.setStatus(status);
   return response;
 }
 if (attrs == null) {
-  LOG.error("Can't get path for fileId: " + handle.getFileId());
+  LOG.error("Can't get path for fileId: {}", handle.getFileId());
   response.setStatus(Nfs3Status.NFS3ERR_STALE);
   return 

[50/50] [abbrv] hadoop git commit: adding missing file

2018-02-15 Thread aengineer
adding missing file


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2ffd9ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2ffd9ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2ffd9ce

Branch: refs/heads/HDFS-7240
Commit: a2ffd9ceaf0240dfd811a10b987f259c7ea1d93c
Parents: 4791978
Author: Anu Engineer 
Authored: Thu Feb 15 15:37:57 2018 -0800
Committer: Anu Engineer 
Committed: Thu Feb 15 15:37:57 2018 -0800

--
 .../hadoop/ozone/container/common/impl/ContainerManagerImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2ffd9ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 65b8726..f701900 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -747,7 +747,7 @@ public class ContainerManagerImpl implements 
ContainerManager {
 
   }
 
-  @Override
+@Override
   public void readLockInterruptibly() throws InterruptedException {
 this.lock.readLock().lockInterruptibly();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2018-02-15 Thread aengineer
Merge branch 'trunk' into HDFS-7240

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

Added the following code in:
hadoop/ozone/container/common/impl/ContainerManagerImpl.java
  @Override
  public void readLockInterruptibly() throws InterruptedException {
this.lock.readLock().lockInterruptibly();
  }

and Manually updated  the value of version in
modified: hadoop-tools/hadoop-ozone/pom.xml
to
3.2.0-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47919787
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47919787
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47919787

Branch: refs/heads/HDFS-7240
Commit: 479197872ba89159ec2160fbdda92a1665362b5d
Parents: fc84744 4747395
Author: Anu Engineer 
Authored: Thu Feb 15 15:28:08 2018 -0800
Committer: Anu Engineer 
Committed: Thu Feb 15 15:28:08 2018 -0800

--
 hadoop-assemblies/pom.xml   |   4 +-
 hadoop-build-tools/pom.xml  |   2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml |   4 +-
 .../hadoop-client-check-invariants/pom.xml  |   4 +-
 .../hadoop-client-check-test-invariants/pom.xml |   4 +-
 .../hadoop-client-integration-tests/pom.xml |   4 +-
 .../hadoop-client-minicluster/pom.xml   |   4 +-
 .../hadoop-client-runtime/pom.xml   |   4 +-
 hadoop-client-modules/hadoop-client/pom.xml |   4 +-
 hadoop-client-modules/pom.xml   |   2 +-
 .../hadoop-cloud-storage/pom.xml|   4 +-
 hadoop-cloud-storage-project/pom.xml|   4 +-
 .../hadoop-annotations/pom.xml  |   4 +-
 .../hadoop-auth-examples/pom.xml|   4 +-
 hadoop-common-project/hadoop-auth/pom.xml   |  14 +-
 .../client/AuthenticatorTestCase.java   |  51 +-
 .../client/TestKerberosAuthenticator.java   |  41 +-
 hadoop-common-project/hadoop-common/pom.xml |   4 +-
 .../hadoop-common/src/main/bin/hadoop   |   4 +
 .../hadoop-common/src/main/bin/hadoop.cmd   |   7 +-
 .../org/apache/hadoop/conf/Configuration.java   |  80 ++
 .../org/apache/hadoop/conf/StorageSize.java | 106 +++
 .../org/apache/hadoop/conf/StorageUnit.java | 530 +++
 .../hadoop/fs/CommonConfigurationKeys.java  |   2 +-
 .../java/org/apache/hadoop/fs/FileUtil.java | 257 +-
 .../org/apache/hadoop/fs/LocalFileSystem.java   |   2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |  30 +-
 .../apache/hadoop/ha/FailoverController.java|  20 +-
 .../org/apache/hadoop/ha/HealthMonitor.java |   9 +-
 .../org/apache/hadoop/http/HttpServer2.java |   2 +-
 .../org/apache/hadoop/io/retry/RetryUtils.java  |  11 +-
 .../main/java/org/apache/hadoop/net/DNS.java|  39 +-
 .../AbstractDelegationTokenSecretManager.java   |   6 +
 .../apache/hadoop/service/AbstractService.java  |  27 +-
 .../hadoop/service/ServiceOperations.java   |   6 +-
 .../org/apache/hadoop/util/CombinedIPList.java  |  59 ++
 .../hadoop/util/GenericOptionsParser.java   |   8 +-
 .../apache/hadoop/util/JsonSerialization.java   |   8 +
 .../java/org/apache/hadoop/util/RunJar.java |  69 ++
 .../src/main/resources/core-default.xml |  13 +-
 .../src/site/markdown/CommandsManual.md |   6 +
 .../src/site/markdown/SecureMode.md |  32 +-
 .../apache/hadoop/conf/TestConfiguration.java   |  76 ++
 .../org/apache/hadoop/conf/TestStorageUnit.java | 277 ++
 .../hadoop/fs/TestDelegateToFileSystem.java |   2 +-
 .../java/org/apache/hadoop/fs/TestFileUtil.java |  86 ++
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |   2 +-
 .../hadoop/service/TestServiceOperations.java   |   3 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  40 +-
 .../apache/hadoop/test/TestLambdaTestUtils.java |  36 +
 .../java/org/apache/hadoop/util/TestRunJar.java |  57 ++
 .../src/test/scripts/start-build-env.bats   | 102 +++
 hadoop-common-project/hadoop-kms/pom.xml|   4 +-
 .../hadoop/crypto/key/kms/server/KMS.java   |   4 +-
 hadoop-common-project/hadoop-minikdc/pom.xml|   4 +-
 hadoop-common-project/hadoop-nfs/pom.xml|   4 +-
 hadoop-common-project/pom.xml   |   4 +-
 hadoop-dist/pom.xml |   4 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   4 +-
 .../org/apache/hadoop/hdfs/ClientContext.java   |   3 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java |   4 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   6 +-
 .../hdfs/client/HdfsClientConfigKeys.java   |   5 +-
 .../hdfs/client/impl/BlockReaderFactory.java  

[44/50] [abbrv] hadoop git commit: HADOOP-13972. ADLS to support per-store configuration. Contributed by Sharad Sonker.

2018-02-15 Thread aengineer
HADOOP-13972. ADLS to support per-store configuration.
Contributed by Sharad Sonker.

(cherry picked from commit 050f5287b79324b7f6231b879c0bfc608203b980)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481d79fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481d79fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481d79fe

Branch: refs/heads/HDFS-7240
Commit: 481d79fedc48942654dab08e23e71e80c8eb2aca
Parents: 9a013b2
Author: Steve Loughran 
Authored: Thu Feb 15 16:25:55 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 16:27:31 2018 +

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +-
 .../src/site/markdown/index.md  |  37 +
 .../fs/adl/TestValidateConfiguration.java   | 152 +++
 .../hadoop/fs/adl/common/Parallelized.java  |   2 +-
 5 files changed, 239 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 790902c..e3a4ad6 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -33,6 +33,11 @@ public final class AdlConfKeys {
   public static final String AZURE_AD_REFRESH_URL_KEY =
   "fs.adl.oauth2.refresh.url";
 
+  public static final String AZURE_AD_ACCOUNT_PREFIX =
+  "fs.adl.account.";
+  public static final String AZURE_AD_PREFIX =
+  "fs.adl.";
+
   // optional when provider type is refresh or client id.
   public static final String AZURE_AD_TOKEN_PROVIDER_CLASS_KEY =
   "fs.adl.oauth2.access.token.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index a496595..9f54a36 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -24,8 +24,10 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
@@ -37,6 +39,8 @@ import 
com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -74,6 +78,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AdlFileSystem.class);
   public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
@@ -115,12 +121,19 @@ public class AdlFileSystem extends FileSystem {
   /**
* Called after a new FileSystem instance is constructed.
*
-   * @param storeUri a uri whose authority section names the host, port, etc.
-   * for this FileSystem
-   * @param conf the configuration
+   * @param storeUri  a uri whose authority section names the host, port,
+   *  etc. for this FileSystem
+   * @param originalConf  the configuration to use for the FS. The account-
+   *  specific options are patched over the base ones
+   *  before any use is made of the config.
*/
   @Override
-  public void initialize(URI storeUri, Configuration 

[02/50] [abbrv] hadoop git commit: YARN-7815. Make the YARN mounts added to Docker containers more restrictive. Contributed by Shane Kumpf

2018-02-15 Thread aengineer
YARN-7815. Make the YARN mounts added to Docker containers more restrictive. 
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/456705a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/456705a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/456705a0

Branch: refs/heads/HDFS-7240
Commit: 456705a07c8b80658950acc99f23086244c6b20f
Parents: 01bd6ab
Author: Jason Lowe 
Authored: Wed Feb 7 13:09:08 2018 -0600
Committer: Jason Lowe 
Committed: Wed Feb 7 13:09:08 2018 -0600

--
 .../nodemanager/LinuxContainerExecutor.java |   3 +
 .../launcher/ContainerLaunch.java   |  52 +++-
 .../launcher/ContainerRelaunch.java |   5 +
 .../runtime/DockerLinuxContainerRuntime.java|  23 +-
 .../runtime/LinuxContainerRuntimeConstants.java |   4 +
 .../linux/runtime/docker/DockerRunCommand.java  |  14 +
 .../executor/ContainerStartContext.java |  26 ++
 .../TestLinuxContainerExecutorWithMocks.java|   8 +
 .../launcher/TestContainerRelaunch.java |   2 +
 .../runtime/TestDockerContainerRuntime.java | 271 ---
 10 files changed, 232 insertions(+), 176 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index d359f31..fe54e2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -651,6 +651,9 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   .setExecutionAttribute(FILECACHE_DIRS, ctx.getFilecacheDirs())
   .setExecutionAttribute(USER_LOCAL_DIRS, ctx.getUserLocalDirs())
   .setExecutionAttribute(CONTAINER_LOCAL_DIRS, ctx.getContainerLocalDirs())
+  .setExecutionAttribute(USER_FILECACHE_DIRS, ctx.getUserFilecacheDirs())
+  .setExecutionAttribute(APPLICATION_LOCAL_DIRS,
+  ctx.getApplicationLocalDirs())
   .setExecutionAttribute(CONTAINER_LOG_DIRS, ctx.getContainerLogDirs())
   .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 7f43458..112f54a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -169,6 +169,17 @@ public class ContainerLaunch implements Callable {
 return var;
   }
 
+  private Map expandAllEnvironmentVars(
+  ContainerLaunchContext launchContext, Path containerLogDir) {
+Map environment = launchContext.getEnvironment();
+for (Entry entry : environment.entrySet()) {
+  String value = entry.getValue();
+  value = expandEnvironment(value, containerLogDir);
+  entry.setValue(value);
+}
+return environment;
+  }
+
   @Override
   @SuppressWarnings("unchecked") // dispatcher not typed
   public Integer call() {
@@ -202,13 +213,8 @@ public class ContainerLaunch implements Callable {
   }
   launchContext.setCommands(newCmds);
 
-  Map environment = 

[41/50] [abbrv] hadoop git commit: HADOOP-15076. Enhance S3A troubleshooting documents and add a performance document. Contributed by Steve Loughran.

2018-02-15 Thread aengineer
HADOOP-15076. Enhance S3A troubleshooting documents and add a performance 
document.
Contributed by Steve Loughran.

(cherry picked from commit c761e658f6594c4e519ed39ef36669de2c5cee15)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b27ab7dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b27ab7dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b27ab7dd

Branch: refs/heads/HDFS-7240
Commit: b27ab7dd81359df0a7594ebb98e656a41cd19250
Parents: c9a373f
Author: Steve Loughran 
Authored: Thu Feb 15 14:57:56 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 14:57:56 2018 +

--
 .../markdown/tools/hadoop-aws/encryption.md |  21 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  77 +-
 .../markdown/tools/hadoop-aws/performance.md| 518 +
 .../tools/hadoop-aws/troubleshooting_s3a.md | 753 ---
 4 files changed, 1029 insertions(+), 340 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b27ab7dd/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
index 719c5e5..54398d7 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
@@ -37,6 +37,8 @@ and keys with which the file was encrypted.
 * You can use AWS bucket policies to mandate encryption rules for a bucket.
 * You can use S3A per-bucket configuration to ensure that S3A clients use 
encryption
 policies consistent with the mandated rules.
+* You can use S3 Default Encryption to encrypt data without needing to
+set anything in the client.
 * Changing the encryption options on the client does not change how existing
 files were encrypted, except when the files are renamed.
 * For all mechanisms other than SSE-C, clients do not need any configuration
@@ -58,9 +60,10 @@ The server-side "SSE" encryption is performed with symmetric 
AES256 encryption;
 S3 offers different mechanisms for actually defining the key to use.
 
 
-There are thrre key management mechanisms, which in order of simplicity of use,
+There are four key management mechanisms, which in order of simplicity of use,
 are:
 
+* S3 Default Encryption
 * SSE-S3: an AES256 key is generated in S3, and saved alongside the data.
 * SSE-KMS: an AES256 key is generated in S3, and encrypted with a secret key 
provided
 by Amazon's Key Management Service, a key referenced by name in the uploading 
client.
@@ -68,6 +71,19 @@ by Amazon's Key Management Service, a key referenced by name 
in the uploading cl
 to encrypt and decrypt the data.
 
 
+##  S3 Default Encryption
+
+This feature allows the administrators of the AWS account to set the "default"
+encryption policy on a bucket -the encryption to use if the client does
+not explicitly declare an encryption algorithm.
+
+[S3 Default Encryption for S3 
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+
+This supports SSE-S3 and SSE-KMS.
+
+There is no need to set anything up in the client: do it in the AWS console.
+
+
 ##  SSE-S3 Amazon S3-Managed Encryption Keys
 
 In SSE-S3, all keys and secrets are managed inside S3. This is the simplest 
encryption mechanism.
@@ -413,7 +429,6 @@ How can you do that from Hadoop? With `rename()`.
 
 The S3A client mimics a real filesystem's' rename operation by copying all the
 source files to the destination paths, then deleting the old ones.
-If you do a rename()
 
 Note: this does not work for SSE-C, because you cannot set a different key
 for reading as for writing, and you must supply that key for reading. There
@@ -421,7 +436,7 @@ you need to copy one bucket to a different bucket, one with 
a different key.
 Use `distCp`for this, with per-bucket encryption policies.
 
 
-##  Troubleshooting Encryption
+##  Troubleshooting Encryption
 
 The [troubleshooting](./troubleshooting_s3a.html) document covers
 stack traces which may surface when working with encrypted data.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b27ab7dd/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 0e03100..edf392d 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -25,6 +25,7 

[38/50] [abbrv] hadoop git commit: HADOOP-15204. Add Configuration API for parsing storage sizes. Contributed by Anu Engineer.

2018-02-15 Thread aengineer
HADOOP-15204. Add Configuration API for parsing storage sizes. Contributed by 
Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f66affd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f66affd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f66affd

Branch: refs/heads/HDFS-7240
Commit: 8f66affd6265c9e4231e18d7ca352fb3035dae9a
Parents: bddfe42
Author: Anu Engineer 
Authored: Wed Feb 14 13:11:37 2018 -0800
Committer: Anu Engineer 
Committed: Wed Feb 14 13:11:37 2018 -0800

--
 .../org/apache/hadoop/conf/Configuration.java   |  80 +++
 .../org/apache/hadoop/conf/StorageSize.java | 106 
 .../org/apache/hadoop/conf/StorageUnit.java | 530 +++
 .../apache/hadoop/conf/TestConfiguration.java   |  76 +++
 .../org/apache/hadoop/conf/TestStorageUnit.java | 277 ++
 5 files changed, 1069 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f66affd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index fce2194..f8e4638 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -109,6 +109,9 @@ import org.w3c.dom.Element;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static org.apache.commons.lang3.StringUtils.isBlank;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
 /**
  * Provides access to configuration parameters.
  *
@@ -1818,6 +1821,83 @@ public class Configuration implements 
Iterable>,
 }
 return durations;
   }
+  /**
+   * Gets the Storage Size from the config, or returns the defaultValue. The
+   * unit of return value is specified in target unit.
+   *
+   * @param name - Key Name
+   * @param defaultValue - Default Value -- e.g. 100MB
+   * @param targetUnit - The units that we want result to be in.
+   * @return double -- formatted in target Units
+   */
+  public double getStorageSize(String name, String defaultValue,
+  StorageUnit targetUnit) {
+Preconditions.checkState(isNotBlank(name), "Key cannot be blank.");
+String vString = get(name);
+if (isBlank(vString)) {
+  vString = defaultValue;
+}
+
+// Please note: There is a bit of subtlety here. If the user specifies
+// the default unit as "1GB", but the requested unit is MB, we will return
+// the format in MB even thought the default string is specified in GB.
+
+// Converts a string like "1GB" to to unit specified in targetUnit.
+
+StorageSize measure = StorageSize.parse(vString);
+return convertStorageUnit(measure.getValue(), measure.getUnit(),
+targetUnit);
+  }
+
+  /**
+   * Gets storage size from a config file.
+   *
+   * @param name - Key to read.
+   * @param defaultValue - The default value to return in case the key is
+   * not present.
+   * @param targetUnit - The Storage unit that should be used
+   * for the return value.
+   * @return - double value in the Storage Unit specified.
+   */
+  public double getStorageSize(String name, double defaultValue,
+  StorageUnit targetUnit) {
+Preconditions.checkNotNull(targetUnit, "Conversion unit cannot be null.");
+Preconditions.checkState(isNotBlank(name), "Name cannot be blank.");
+String vString = get(name);
+if (isBlank(vString)) {
+  return targetUnit.getDefault(defaultValue);
+}
+
+StorageSize measure = StorageSize.parse(vString);
+return convertStorageUnit(measure.getValue(), measure.getUnit(),
+targetUnit);
+
+  }
+
+  /**
+   * Sets Storage Size for the specified key.
+   *
+   * @param name - Key to set.
+   * @param value - The numeric value to set.
+   * @param unit - Storage Unit to be used.
+   */
+  public void setStorageSize(String name, double value, StorageUnit unit) {
+set(name, value + unit.getShortName());
+  }
+
+  /**
+   * convert the value from one storage unit to another.
+   *
+   * @param value - value
+   * @param sourceUnit - Source unit to convert from
+   * @param targetUnit - target unit.
+   * @return double.
+   */
+  private double convertStorageUnit(double value, StorageUnit sourceUnit,
+  StorageUnit targetUnit) {
+double byteValue = sourceUnit.toBytes(value);
+return targetUnit.fromBytes(byteValue);
+  }
 
   /**
* Get the value of the name 

[08/50] [abbrv] hadoop git commit: YARN-7655. Avoid AM preemption caused by RRs for specific nodes or racks. Contributed by Steven Rand.

2018-02-15 Thread aengineer
YARN-7655. Avoid AM preemption caused by RRs for specific nodes or racks. 
Contributed by Steven Rand.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bc03ddf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bc03ddf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bc03ddf

Branch: refs/heads/HDFS-7240
Commit: 1bc03ddf97f3f0e0ecc1b00217438d3c91d29be5
Parents: eb2449d
Author: Yufei Gu 
Authored: Thu Feb 8 12:32:43 2018 -0800
Committer: Yufei Gu 
Committed: Thu Feb 8 12:32:43 2018 -0800

--
 .../scheduler/fair/FSPreemptionThread.java  | 62 +---
 .../fair/TestFairSchedulerPreemption.java   | 55 +
 2 files changed, 96 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bc03ddf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index c05bff9..c32565f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
@@ -99,7 +99,10 @@ class FSPreemptionThread extends Thread {
* starvation.
* 2. For each {@link ResourceRequest}, iterate through matching
* nodes and identify containers to preempt all on one node, also
-   * optimizing for least number of AM container preemptions.
+   * optimizing for least number of AM container preemptions. Only nodes
+   * that match the locality level specified in the {@link ResourceRequest}
+   * are considered. However, if this would lead to AM preemption, and locality
+   * relaxation is allowed, then the search space is expanded to all nodes.
*
* @param starvedApp starved application for which we are identifying
*   preemption targets
@@ -111,27 +114,21 @@ class FSPreemptionThread extends Thread {
 
 // Iterate through enough RRs to address app's starvation
 for (ResourceRequest rr : starvedApp.getStarvedResourceRequests()) {
+  List potentialNodes = scheduler.getNodeTracker()
+  .getNodesByResourceName(rr.getResourceName());
   for (int i = 0; i < rr.getNumContainers(); i++) {
-PreemptableContainers bestContainers = null;
-List potentialNodes = scheduler.getNodeTracker()
-.getNodesByResourceName(rr.getResourceName());
-int maxAMContainers = Integer.MAX_VALUE;
-
-for (FSSchedulerNode node : potentialNodes) {
-  PreemptableContainers preemptableContainers =
-  identifyContainersToPreemptOnNode(
-  rr.getCapability(), node, maxAMContainers);
-
-  if (preemptableContainers != null) {
-// This set is better than any previously identified set.
-bestContainers = preemptableContainers;
-maxAMContainers = bestContainers.numAMContainers;
-
-if (maxAMContainers == 0) {
-  break;
-}
-  }
-} // End of iteration through nodes for one RR
+PreemptableContainers bestContainers =
+identifyContainersToPreemptForOneContainer(potentialNodes, rr);
+
+// Don't preempt AM containers just to satisfy local requests if relax
+// locality is enabled.
+if (bestContainers != null
+&& bestContainers.numAMContainers > 0
+&& !ResourceRequest.isAnyLocation(rr.getResourceName())
+&& rr.getRelaxLocality()) {
+  bestContainers = identifyContainersToPreemptForOneContainer(
+  scheduler.getNodeTracker().getAllNodes(), rr);
+}
 
 if (bestContainers != null) {
   List containers = bestContainers.getAllContainers();
@@ -154,6 +151,29 @@ class FSPreemptionThread extends Thread {
 return containersToPreempt;
   }
 
+  private PreemptableContainers identifyContainersToPreemptForOneContainer(
+  List potentialNodes, ResourceRequest rr) {
+PreemptableContainers bestContainers = null;
+   

[43/50] [abbrv] hadoop git commit: HADOOP-15176. Enhance IAM Assumed Role support in S3A client. Contributed by Steve Loughran

2018-02-15 Thread aengineer
HADOOP-15176. Enhance IAM Assumed Role support in S3A client.
Contributed by Steve Loughran

(cherry picked from commit 96c047fbb98c2378eed9693a724d4cbbd03c00fd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a013b25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a013b25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a013b25

Branch: refs/heads/HDFS-7240
Commit: 9a013b255f301c557c3868dc1ad657202e9e7a67
Parents: b27ab7d
Author: Steve Loughran 
Authored: Thu Feb 15 15:56:10 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 15:57:10 2018 +

--
 .../apache/hadoop/util/JsonSerialization.java   |   8 +
 .../src/main/resources/core-default.xml |  13 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  40 +-
 .../apache/hadoop/test/TestLambdaTestUtils.java |  36 +
 .../fs/s3a/AssumedRoleCredentialProvider.java   | 197 -
 .../org/apache/hadoop/fs/s3a/Constants.java |   2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  17 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  53 +-
 .../s3a/auth/AssumedRoleCredentialProvider.java | 205 +
 .../apache/hadoop/fs/s3a/auth/RoleModel.java| 314 
 .../apache/hadoop/fs/s3a/auth/RolePolicies.java | 228 ++
 .../apache/hadoop/fs/s3a/auth/package-info.java |  27 +
 .../hadoop/fs/s3a/commit/CommitOperations.java  |   2 +-
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 274 ++-
 .../site/markdown/tools/hadoop-aws/testing.md   |  15 +-
 .../s3a/ITestS3AContractDistCpAssumedRole.java  |  52 --
 .../apache/hadoop/fs/s3a/ITestAssumeRole.java   | 324 
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  46 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  40 +-
 .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 789 +++
 .../auth/ITestAssumedRoleCommitOperations.java  | 130 +++
 .../hadoop/fs/s3a/auth/RoleTestUtils.java   | 171 
 .../fs/s3a/commit/AbstractCommitITest.java  |  12 +-
 .../fs/s3a/commit/ITestCommitOperations.java|   4 +-
 24 files changed, 2345 insertions(+), 654 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
index 15f4fef..86c4df6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
@@ -90,6 +90,14 @@ public class JsonSerialization {
   }
 
   /**
+   * Get the mapper of this class.
+   * @return the mapper
+   */
+  public ObjectMapper getMapper() {
+return mapper;
+  }
+
+  /**
* Convert from JSON.
*
* @param json input

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ede1f1c..ece54c4 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -977,20 +977,21 @@
 
 
 
-  fs.s3a.assumed.role.session.duration
-  30m
+  fs.s3a.assumed.role.policy
+  
   
-Duration of assumed roles before a refresh is attempted.
+JSON policy to apply to the role.
 Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   
 
 
 
-  fs.s3a.assumed.role.policy
-  
+  fs.s3a.assumed.role.session.duration
+  30m
   
-JSON policy containing more restrictions to apply to the role.
+Duration of assumed roles before a refresh is attempted.
 Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+Range: 15m to 1h
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index 22208f7..cbb5288 100644
--- 

[22/50] [abbrv] hadoop git commit: YARN-7917. Fix failing test TestDockerContainerRuntime#testLaunchContainerWithDockerTokens. Contributed by Shane Kumpf

2018-02-15 Thread aengineer
YARN-7917. Fix failing test 
TestDockerContainerRuntime#testLaunchContainerWithDockerTokens. Contributed by 
Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3414fd12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3414fd12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3414fd12

Branch: refs/heads/HDFS-7240
Commit: 3414fd1243c0b0dcbb9cea986af5f096300eba97
Parents: 96bb6a5
Author: Jason Lowe 
Authored: Mon Feb 12 09:27:43 2018 -0600
Committer: Jason Lowe 
Committed: Mon Feb 12 09:27:43 2018 -0600

--
 .../linux/runtime/TestDockerContainerRuntime.java   | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3414fd12/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index e9cf765..4c53eb1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -1775,7 +1775,7 @@ public class TestDockerContainerRuntime {
 List dockerCommands = Files
 .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-int expected = 15;
+int expected = 16;
 int counter = 0;
 Assert.assertEquals(expected, dockerCommands.size());
 Assert.assertEquals("[docker-command-execution]",
@@ -1797,12 +1797,12 @@ public class TestDockerContainerRuntime {
 dockerCommands.get(counter++));
 Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
 Assert.assertEquals("  net=host", dockerCommands.get(counter++));
+Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
++ "/test_user_filecache_dir:/test_user_filecache_dir",
+dockerCommands.get(counter++));
 Assert.assertEquals(
-"  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-+ "/test_filecache_dir:/test_filecache_dir,"
-+ "/test_container_work_dir:/test_container_work_dir,"
-+ "/test_container_log_dir:/test_container_log_dir,"
-+ "/test_user_local_dir:/test_user_local_dir",
+"  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
++ "/test_application_local_dir:/test_application_local_dir",
 dockerCommands.get(counter++));
 Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
 Assert.assertEquals("  workdir=/test_container_work_dir",


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions. Contributed by Andras Bokor.

2018-02-15 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index b78fc9c..d0ded89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -148,10 +148,8 @@ class DataXceiver extends Receiver implements Runnable {
 (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
 localAddress = peer.getLocalAddressString();
 
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Number of active connections is: "
-  + datanode.getXceiverCount());
-}
+LOG.debug("Number of active connections is: {}",
+datanode.getXceiverCount());
   }
 
   /**
@@ -187,7 +185,7 @@ class DataXceiver extends Receiver implements Runnable {
 // This doesn't need to be in a critical section. Althogh the client
 // can resue the connection to issue a different request, trying sending
 // an OOB through the recently closed block receiver is harmless.
-LOG.info("Sending OOB to peer: " + peer);
+LOG.info("Sending OOB to peer: {}", peer);
 br.sendOOB();
   }
 
@@ -199,7 +197,7 @@ class DataXceiver extends Receiver implements Runnable {
   }
   xceiver.interrupt();
 }
-LOG.info("Stopped the writer: " + peer);
+LOG.info("Stopped the writer: {}", peer);
   }
 
   /**
@@ -239,14 +237,15 @@ class DataXceiver extends Receiver implements Runnable {
   } catch (InvalidMagicNumberException imne) {
 if (imne.isHandshake4Encryption()) {
   LOG.info("Failed to read expected encryption handshake from client " 
+
-  "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
+  "at {}. Perhaps the client " +
   "is running an older version of Hadoop which does not support " +
-  "encryption", imne);
+  "encryption", peer.getRemoteAddressString(), imne);
 } else {
   LOG.info("Failed to read expected SASL data transfer protection " +
-  "handshake from client at " + peer.getRemoteAddressString() + 
+  "handshake from client at {}" +
   ". Perhaps the client is running an older version of Hadoop " +
-  "which does not support SASL data transfer protection", imne);
+  "which does not support SASL data transfer protection",
+  peer.getRemoteAddressString(), imne);
 }
 return;
   }
@@ -302,7 +301,7 @@ class DataXceiver extends Receiver implements Runnable {
 if (LOG.isTraceEnabled()) {
   LOG.trace(s, t);
 } else {
-  LOG.info(s + "; " + t);
+  LOG.info("{}; {}", s, t.toString());
 }
   } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
 String s1 =
@@ -311,23 +310,19 @@ class DataXceiver extends Receiver implements Runnable {
 if (LOG.isTraceEnabled()) {
   LOG.trace(s1, t);
 } else {
-  LOG.info(s1 + "; " + t);  
+  LOG.info("{}; {}", s1, t.toString());
 }
   } else if (t instanceof InvalidToken) {
 // The InvalidToken exception has already been logged in
 // checkAccess() method and this is not a server error.
-if (LOG.isTraceEnabled()) {
-  LOG.trace(s, t);
-}
+LOG.trace(s, t);
   } else {
 LOG.error(s, t);
   }
 } finally {
   collectThreadLocalStates();
-  if (LOG.isDebugEnabled()) {
-LOG.debug(datanode.getDisplayName() + ":Number of active connections 
is: "
-+ datanode.getXceiverCount());
-  }
+  LOG.debug("{}:Number of active connections is: {}",
+  datanode.getDisplayName(), datanode.getXceiverCount());
   updateCurrentThreadName("Cleaning up");
   if (peer != null) {
 dataXceiverServer.closePeer(peer);
@@ -405,21 +400,22 @@ class DataXceiver extends Receiver implements Runnable {
 DomainSocket sock = peer.getDomainSocket();
 sock.sendFileDescriptors(fds, buf, 0, buf.length);
 if (supportsReceiptVerification) {
-  LOG.trace("Reading receipt verification byte for " + slotId);
+  LOG.trace("Reading receipt verification byte for {}", slotId);
   int val = sock.getInputStream().read();
   if (val < 0) {
 throw new EOFException();
   }
 } else {
-  LOG.trace("Receipt verification is not enabled on the DataNode.  " +
-"Not verifying " + slotId);
+ 

[32/50] [abbrv] hadoop git commit: YARN-7789. Should fail RM if 3rd resource type is configured but RM uses DefaultResourceCalculator. (Zian Chen via wangda)

2018-02-15 Thread aengineer
YARN-7789. Should fail RM if 3rd resource type is configured but RM uses 
DefaultResourceCalculator. (Zian Chen via wangda)

Change-Id: I1f6a451fe16758def6f47c046a9b8a67ad7255af


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/042ef2fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/042ef2fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/042ef2fa

Branch: refs/heads/HDFS-7240
Commit: 042ef2fa7bcc22e3ca4eb8205c34d83e594bc7de
Parents: 60971b8
Author: Wangda Tan 
Authored: Wed Feb 14 23:11:10 2018 +0800
Committer: Wangda Tan 
Committed: Wed Feb 14 23:11:10 2018 +0800

--
 .../scheduler/capacity/CapacityScheduler.java   |  8 +
 ...CapacitySchedulerWithMultiResourceTypes.java | 37 
 2 files changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/042ef2fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d3aa5cb..cd9d1373 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -338,6 +338,14 @@ public class CapacityScheduler extends
   this.minimumAllocation = super.getMinimumAllocation();
   initMaximumResourceCapability(super.getMaximumAllocation());
   this.calculator = this.conf.getResourceCalculator();
+  if (this.calculator instanceof DefaultResourceCalculator
+  && ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
+throw new YarnRuntimeException("RM uses DefaultResourceCalculator 
which"
++ " used only memory as resource-type but invalid resource-types"
++ " specified " + ResourceUtils.getResourceTypes() + ". Use"
++ " DomainantResourceCalculator instead to make effective use of"
++ " these resource-types");
+  }
   this.usePortForNodeName = this.conf.getUsePortForNodeName();
   this.applications = new ConcurrentHashMap<>();
   this.labelManager = rmContext.getNodeLabelManager();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042ef2fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
index 2da2cdd..ea29f7f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -38,6 +39,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 

[25/50] [abbrv] hadoop git commit: YARN-7914. Fix exit code handling for short lived Docker containers. Contributed by Shane Kumpf

2018-02-15 Thread aengineer
YARN-7914. Fix exit code handling for short lived Docker containers. 
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a1db60a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a1db60a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a1db60a

Branch: refs/heads/HDFS-7240
Commit: 5a1db60ab1e8b28cd73367c69970513de88cf4dd
Parents: 87e2570
Author: Jason Lowe 
Authored: Mon Feb 12 15:50:10 2018 -0600
Committer: Jason Lowe 
Committed: Mon Feb 12 15:50:10 2018 -0600

--
 .../impl/container-executor.c   | 92 ++--
 1 file changed, 46 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a1db60a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 5ce6a00..035c694 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1577,58 +1577,58 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   sleep(1);
 }
 #endif
+  }
 
-sprintf(docker_inspect_exitcode_command,
-  "%s inspect --format {{.State.ExitCode}} %s",
-docker_binary, container_id);
-fprintf(LOGFILE, "Obtaining the exit code...\n");
-fprintf(LOGFILE, "Docker inspect command: %s\n", 
docker_inspect_exitcode_command);
-FILE* inspect_exitcode_docker = popen(docker_inspect_exitcode_command, 
"r");
-if(inspect_exitcode_docker == NULL) {
-  fprintf(ERRORFILE, "Done with inspect_exitcode, inspect_exitcode_docker 
is null\n");
-  fflush(ERRORFILE);
-  exit_code = -1;
-  goto cleanup;
-}
-res = fscanf (inspect_exitcode_docker, "%d", _code);
-if (pclose (inspect_exitcode_docker) != 0 || res <= 0) {
-fprintf (ERRORFILE,
- "Could not inspect docker to get exitcode:  %s.\n", 
docker_inspect_exitcode_command);
-  fflush(ERRORFILE);
-  exit_code = -1;
-  goto cleanup;
-}
-fprintf(LOGFILE, "Exit code from docker inspect: %d\n", exit_code);
-if(exit_code != 0) {
-  fprintf(ERRORFILE, "Docker container exit code was not zero: %d\n",
-  exit_code);
-  snprintf(docker_logs_command, command_size, "%s logs --tail=250 %s",
-docker_binary, container_id);
-  FILE* logs = popen(docker_logs_command, "r");
-  if(logs != NULL) {
-clearerr(logs);
-res = fread(buffer, BUFFER_SIZE, 1, logs);
-if(res < 1) {
-  fprintf(ERRORFILE, "%s %d %d\n",
-"Unable to read from docker logs(ferror, feof):", ferror(logs), 
feof(logs));
-  fflush(ERRORFILE);
-}
-else {
-  fprintf(ERRORFILE, "%s\n", buffer);
-  fflush(ERRORFILE);
-}
-  }
-  else {
-fprintf(ERRORFILE, "%s\n", "Failed to get output of docker logs");
-fprintf(ERRORFILE, "Command was '%s'\n", docker_logs_command);
-fprintf(ERRORFILE, "%s\n", strerror(errno));
+  sprintf(docker_inspect_exitcode_command,
+"%s inspect --format {{.State.ExitCode}} %s",
+  docker_binary, container_id);
+  fprintf(LOGFILE, "Obtaining the exit code...\n");
+  fprintf(LOGFILE, "Docker inspect command: %s\n", 
docker_inspect_exitcode_command);
+  FILE* inspect_exitcode_docker = popen(docker_inspect_exitcode_command, "r");
+  if(inspect_exitcode_docker == NULL) {
+fprintf(ERRORFILE, "Done with inspect_exitcode, inspect_exitcode_docker is 
null\n");
+fflush(ERRORFILE);
+exit_code = -1;
+goto cleanup;
+  }
+  res = fscanf (inspect_exitcode_docker, "%d", _code);
+  if (pclose (inspect_exitcode_docker) != 0 || res <= 0) {
+  fprintf (ERRORFILE,
+   "Could not inspect docker to get exitcode:  %s.\n", 
docker_inspect_exitcode_command);
+fflush(ERRORFILE);
+exit_code = -1;
+goto cleanup;
+  }
+  fprintf(LOGFILE, "Exit code from docker inspect: %d\n", exit_code);
+  if(exit_code != 0) {
+fprintf(ERRORFILE, "Docker container exit code was not zero: %d\n",
+exit_code);
+snprintf(docker_logs_command, command_size, "%s logs --tail=250 %s",
+  docker_binary, container_id);
+FILE* logs = 

[20/50] [abbrv] hadoop git commit: HADOOP-15187. Remove ADL mock test dependency on REST call invoked from Java SDK. Contributed by Vishwajeet Dusane.

2018-02-15 Thread aengineer
HADOOP-15187. Remove ADL mock test dependency on REST call invoked from Java 
SDK.
Contributed by Vishwajeet Dusane.

(cherry picked from commit bd5ab5912564d2d687651b01f552b8e4ca8c145a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cf88fcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cf88fcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cf88fcd

Branch: refs/heads/HDFS-7240
Commit: 8cf88fcd1f63d3d4e9736b1b687a4f4e663f6125
Parents: d02e42c
Author: Steve Loughran 
Authored: Mon Feb 12 15:13:00 2018 +
Committer: Steve Loughran 
Committed: Mon Feb 12 15:13:00 2018 +

--
 .../apache/hadoop/fs/adl/AdlMockWebServer.java  | 102 ---
 .../apache/hadoop/fs/adl/TestACLFeatures.java   | 262 
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   | 196 
 .../adl/TestConcurrentDataReadOperations.java   | 299 ---
 .../hadoop/fs/adl/TestCustomTokenProvider.java  | 140 -
 .../apache/hadoop/fs/adl/TestGetFileStatus.java | 102 ---
 .../apache/hadoop/fs/adl/TestListStatus.java| 137 -
 .../hadoop/fs/adl/TestableAdlFileSystem.java|  30 --
 .../hadoop/fs/adl/common/ExpectedResponse.java  |  71 -
 .../hadoop/fs/adl/common/TestDataForRead.java   | 122 
 10 files changed, 1461 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/AdlMockWebServer.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/AdlMockWebServer.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/AdlMockWebServer.java
deleted file mode 100644
index d843d55..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/AdlMockWebServer.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.adl;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.adl.common.CustomMockTokenProvider;
-import org.apache.hadoop.fs.adl.oauth2.AzureADTokenProvider;
-import static org.apache.hadoop.fs.adl.AdlConfKeys
-.AZURE_AD_TOKEN_PROVIDER_CLASS_KEY;
-import static org.apache.hadoop.fs.adl.AdlConfKeys
-.AZURE_AD_TOKEN_PROVIDER_TYPE_KEY;
-
-import com.squareup.okhttp.mockwebserver.MockWebServer;
-
-import org.junit.After;
-import org.junit.Before;
-
-/**
- * Mock server to simulate Adls backend calls. This infrastructure is 
expandable
- * to override expected server response based on the derived test 
functionality.
- * Common functionality to generate token information before request is send to
- * adls backend is also managed within AdlMockWebServer implementation using
- * {@link org.apache.hadoop.fs.adl.common.CustomMockTokenProvider}.
- */
-public class AdlMockWebServer {
-  // Create a MockWebServer. These are lean enough that you can create a new
-  // instance for every unit test.
-  private MockWebServer server = null;
-  private TestableAdlFileSystem fs = null;
-  private int port = 0;
-  private Configuration conf = new Configuration();
-
-  public MockWebServer getMockServer() {
-return server;
-  }
-
-  public TestableAdlFileSystem getMockAdlFileSystem() {
-return fs;
-  }
-
-  public int getPort() {
-return port;
-  }
-
-  public Configuration getConf() {
-return conf;
-  }
-
-  public void setConf(Configuration conf) {
-this.conf = conf;
-  }
-
-  @Before
-  public void preTestSetup() throws IOException, URISyntaxException {
-server = new MockWebServer();
-
-// Start the server.
-server.start();
-
-// Ask the server for its URL. You'll need this to make HTTP requests.
-URL baseUrl = server.getUrl("");
-port = baseUrl.getPort();
-
-// 

[27/50] [abbrv] hadoop git commit: Revert "HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine"

2018-02-15 Thread aengineer
Revert "HADOOP-15195. With SELinux enabled, directories mounted with 
start-build-env.sh may not be accessible. Contributed by Grigori Rybkine"

This reverts commit 5b88cb339898f82519223bcd07e1caedff02d051.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cc6d1df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cc6d1df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cc6d1df

Branch: refs/heads/HDFS-7240
Commit: 9cc6d1dfb351f505aaa8f9f028068650b3b00d0d
Parents: 5b88cb3
Author: Chris Douglas 
Authored: Mon Feb 12 21:06:10 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:06:10 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 ---
 start-build-env.sh  |  32 +-
 2 files changed, 3 insertions(+), 131 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cc6d1df/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
deleted file mode 100644
index 0c32bcf..000
--- a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
+++ /dev/null
@@ -1,102 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load hadoop-functions_test_helper
-
-# Mock docker command
-docker () {
-  if [ "$1" = "-v" ]; then
-shift
-echo Docker version ${DCKR_MOCK_VER:?}
-  elif [ "$1" = run ]; then
-shift
-until [ $# -eq 0 ]; do
-  if [ "$1" = -v ]; then
-shift
-echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
-  printf "Mounted %s with %s option.\n", $1, $3
-  else if (NF == 2)
-  printf "Mounted %s without %s option.\n", $1, "z"}'
-  fi
-  shift
-done
-  fi
-}
-export -f docker
-export DCKR_MOCK_VER
-
-# Mock a SELinux enabled system
-enable_selinux () {
-  mkdir -p "${TMP}/bin"
-  echo true >"${TMP}/bin"/selinuxenabled
-  chmod a+x "${TMP}/bin"/selinuxenabled
-  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
-PATH="${TMP}/bin":"$PATH"
-  fi
-}
-
-setup_user () {
-  if [ -z "$(printenv USER)" ]; then
-if [ -z "$USER" ]; then
-  USER=${HOME##*/}
-fi
-export USER
-  fi
-}
-
-# Mock stat command as used in start-build-env.sh
-stat () {
-  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
-printf 'mock_u:mock_r:mock_t:s0'
-  else
-command stat "$@"
-  fi
-}
-export -f stat
-
-# Verify that host directories get mounted without z option
-# and INFO messages get printed out
-@test "start-build-env.sh (Docker without z mount option)" {
-  if [ "$(uname -s)" != "Linux" ]; then
-skip "Not on Linux platform"
-  fi
-  enable_selinux
-  setup_user
-  DCKR_MOCK_VER=1.4
-  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
-  [ "$status" -eq 0 ]
-  [[ ${lines[0]} == "INFO: SELinux policy is enforced." ]]
-  [[ ${lines[1]} =~ \
- "Mounted ".*" may not be accessible to the container." ]]
-  [[ ${lines[2]} == \
- "INFO: If so, on the host, run the following command:" ]]
-  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
-  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
-  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
-}
-
-# Verify that host directories get mounted with z option
-@test "start-build-env.sh (Docker with z mount option)" {
-  if [ "$(uname -s)" != "Linux" ]; then
-skip "Not on Linux platform"
-  fi
-  enable_selinux
-  setup_user
-  DCKR_MOCK_VER=1.7
-  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
-  [ "$status" -eq 0 ]
-  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
-  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cc6d1df/start-build-env.sh
--

[39/50] [abbrv] hadoop git commit: HDFS-13142. Define and Implement a DiifList Interface to store and manage SnapshotDiffs. Contributed by Shashikant Banerjee

2018-02-15 Thread aengineer
HDFS-13142. Define and Implement a DiifList Interface to store and manage 
SnapshotDiffs.  Contributed by Shashikant Banerjee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ea7d78c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ea7d78c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ea7d78c

Branch: refs/heads/HDFS-7240
Commit: 6ea7d78ccb0d1c4af9bcac02a4cff89bd252
Parents: 8f66aff
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Feb 15 19:33:44 2018 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Feb 15 19:33:44 2018 +0800

--
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   3 +-
 .../snapshot/AbstractINodeDiffList.java |  27 ++--
 .../hdfs/server/namenode/snapshot/DiffList.java | 140 +++
 .../namenode/snapshot/DiffListByArrayList.java  |  80 +++
 .../snapshot/DirectoryWithSnapshotFeature.java  |  10 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   4 +-
 .../server/namenode/snapshot/FileDiffList.java  |  11 +-
 .../snapshot/FileWithSnapshotFeature.java   |   2 +-
 .../snapshot/SnapshotFSImageFormat.java |   4 +-
 .../namenode/TestFSImageWithSnapshot.java   |   3 +-
 .../snapshot/TestRenameWithSnapshots.java   |  40 +++---
 .../snapshot/TestSetQuotaWithSnapshot.java  |   3 +-
 .../namenode/snapshot/TestSnapshotRename.java   |   3 +-
 13 files changed, 276 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 90659f3..6693297 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -52,6 +52,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import org.apache.hadoop.hdfs.util.LongBitFormat;
 import org.apache.hadoop.util.StringUtils;
 import static 
org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID;
@@ -988,7 +989,7 @@ public class INodeFile extends INodeWithAdditionalFields
 } else {
   // Collect all distinct blocks
   Set allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
-  List diffs = sf.getDiffs().asList();
+  DiffList diffs = sf.getDiffs().asList();
   for(FileDiff diff : diffs) {
 BlockInfo[] diffBlocks = diff.getBlocks();
 if (diffBlocks != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 98d8c53..8f2465a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
@@ -36,14 +35,15 @@ abstract class AbstractINodeDiffList> 
 implements Iterable {
+
   /** Diff list sorted by snapshot IDs, i.e. in chronological order.
 * Created lazily to avoid wasting memory by empty lists. */
-  private List diffs;
+  private DiffList diffs;
 
   /** @return this list as a unmodifiable {@link List}. */
-  public final List asList() {
+  public final DiffList asList() {
 return diffs != null ?
-Collections.unmodifiableList(diffs) : Collections.emptyList();
+DiffList.unmodifiableList(diffs) : DiffList.emptyList();
   }
   
   /** Clear the list. */
@@ -72,7 +72,7 @@ abstract class 

[06/50] [abbrv] hadoop git commit: HADOOP-15214. Make Hadoop compatible with Guava 21.0. Contributed by Igor Dvorzhak

2018-02-15 Thread aengineer
HADOOP-15214. Make Hadoop compatible with Guava 21.0.
Contributed by Igor Dvorzhak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/996796f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/996796f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/996796f1

Branch: refs/heads/HDFS-7240
Commit: 996796f1048369e0f307f935ba01af64cc751a85
Parents: 8faf0b5
Author: Steve Loughran 
Authored: Thu Feb 8 10:55:54 2018 -0800
Committer: Steve Loughran 
Committed: Thu Feb 8 10:55:54 2018 -0800

--
 .../src/main/java/org/apache/hadoop/util/RunJar.java | 2 +-
 .../main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java   | 4 ++--
 .../apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java  | 3 +--
 3 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 0ae9e47..9dd770c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -38,12 +38,12 @@ import java.util.jar.JarInputStream;
 import java.util.jar.Manifest;
 import java.util.regex.Pattern;
 
-import com.google.common.io.NullOutputStream;
 import org.apache.commons.io.input.TeeInputStream;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IOUtils.NullOutputStream;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index dfc6872..b6b42544 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.crypto.key.kms.server;
 
 import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -32,6 +31,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import 
org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
+import org.apache.hadoop.util.StopWatch;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -555,7 +555,7 @@ public class KMS {
   throws Exception {
 LOG.trace("Entering reencryptEncryptedKeys method.");
 try {
-  final Stopwatch sw = new Stopwatch().start();
+  final StopWatch sw = new StopWatch().start();
   checkNotEmpty(name, "name");
   checkNotNull(jsonPayload, "jsonPayload");
   final UserGroupInformation user = HttpUserGroupInformation.get();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index 01c2038..65de397 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import 

[18/50] [abbrv] hadoop git commit: YARN-7697. NM goes down with OOM due to leak in log-aggregation. (Xuan Gong via wangda)

2018-02-15 Thread aengineer
YARN-7697. NM goes down with OOM due to leak in log-aggregation. (Xuan Gong via 
wangda)

Change-Id: Ie4fc7979d834e25f37a033c314f3efceeb8f4a9e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4c98579
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4c98579
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4c98579

Branch: refs/heads/HDFS-7240
Commit: d4c98579e36df7eeb788352d7b76cd2c7448c511
Parents: 789a185
Author: Wangda Tan 
Authored: Mon Feb 12 10:28:35 2018 +0800
Committer: Wangda Tan 
Committed: Mon Feb 12 10:28:35 2018 +0800

--
 .../LogAggregationFileController.java   |  7 +-
 .../ifile/IndexedFileAggregatedLogsBlock.java   |  2 +-
 .../LogAggregationIndexedFileController.java| 69 +---
 .../tfile/LogAggregationTFileController.java|  5 +-
 ...TestLogAggregationFileControllerFactory.java |  5 +-
 .../TestLogAggregationIndexFileController.java  | 21 ++
 6 files changed, 79 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index 0590535..aeef574 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -226,10 +226,12 @@ public abstract class LogAggregationFileController {
* Returns the owner of the application.
*
* @param aggregatedLogPath the aggregatedLog path
+   * @param appId the ApplicationId
* @return the application owner
* @throws IOException if we can not get the application owner
*/
-  public abstract String getApplicationOwner(Path aggregatedLogPath)
+  public abstract String getApplicationOwner(Path aggregatedLogPath,
+  ApplicationId appId)
   throws IOException;
 
   /**
@@ -237,11 +239,12 @@ public abstract class LogAggregationFileController {
* found.
*
* @param aggregatedLogPath the aggregatedLog path.
+   * @param appId the ApplicationId
* @return a map of the Application ACLs.
* @throws IOException if we can not get the application acls
*/
   public abstract Map getApplicationAcls(
-  Path aggregatedLogPath) throws IOException;
+  Path aggregatedLogPath, ApplicationId appId) throws IOException;
 
   /**
* Verify and create the remote log directory.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
index 6d48d7a..c53ffcc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
@@ -135,7 +135,7 @@ public class IndexedFileAggregatedLogsBlock extends 
LogAggregationHtmlBlock {
 IndexedLogsMeta indexedLogsMeta = null;
 try {
   indexedLogsMeta = fileController.loadIndexedLogsMeta(
-  thisNodeFile.getPath(), endIndex);
+  thisNodeFile.getPath(), endIndex, appId);
 } catch (Exception ex) {
   // DO NOTHING
   LOG.warn("Can not load log meta from the log file:"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java

[40/50] [abbrv] hadoop git commit: HADOOP-15090. Add ADL troubleshooting doc. Contributed by Steve Loughran.

2018-02-15 Thread aengineer
HADOOP-15090. Add ADL troubleshooting doc.
Contributed by Steve Loughran.

(cherry picked from commit 58a2120e8a31307f19551f87be4e81d4fb626de1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9a373fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9a373fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9a373fb

Branch: refs/heads/HDFS-7240
Commit: c9a373fb14bbf826324c2547397f82b73bd466f4
Parents: 6ea7d78
Author: Steve Loughran 
Authored: Thu Feb 15 14:26:00 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 14:26:00 2018 +

--
 .../src/site/markdown/index.md  |   4 +
 .../src/site/markdown/troubleshooting_adl.md| 146 +++
 2 files changed, 150 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a373fb/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ca79321..d2b6edf 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -22,6 +22,10 @@ The `hadoop-azure-datalake` module provides support for 
integration with the
 [Azure Data Lake 
Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
 This support comes via the JAR file `azure-datalake-store.jar`.
 
+### Related Documents
+
+* [Troubleshooting](troubleshooting_adl.html).
+
 ## Features
 
 * Read and write data stored in an Azure Data Lake Storage account.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a373fb/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
new file mode 100644
index 000..80b2a6f
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
@@ -0,0 +1,146 @@
+
+
+# Troubleshooting ADL
+
+
+
+
+## Error messages
+
+
+### Error fetching access token:
+
+You aren't authenticated.
+
+### Error fetching access token:  JsonParseException
+
+This means a problem talking to the oauth endpoint.
+
+
+```
+Operation null failed with exception 
com.fasterxml.jackson.core.JsonParseException : Unexpected character ('<' (code 
60)): expected a valid value (number, String, array, object, 'true', 'false' or 
'null')
+  at [Source: 
sun.net.www.protocol.http.HttpURLConnection$HttpInputStream@211d30ed; line: 3, 
column: 2]
+  Last encountered exception thrown after 5 tries. 
[com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException]
+  [ServerRequestId:null]
+  at 
com.microsoft.azure.datalake.store.ADLStoreClient.getExceptionFromResponse(ADLStoreClient.java:1147)
+  at 
com.microsoft.azure.datalake.store.ADLStoreClient.getDirectoryEntry(ADLStoreClient.java:725)
+  at 
org.apache.hadoop.fs.adl.AdlFileSystem.getFileStatus(AdlFileSystem.java:476)
+  at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1713)
+  at 
org.apache.hadoop.fs.contract.ContractTestUtils.rm(ContractTestUtils.java:397)
+  at 
org.apache.hadoop.fs.contract.ContractTestUtils.cleanup(ContractTestUtils.java:374)
+  at 
org.apache.hadoop.fs.contract.AbstractFSContractTestBase.deleteTestDirInTeardown(AbstractFSContractTestBase.java:213)
+  at 
org.apache.hadoop.fs.contract.AbstractFSContractTestBase.teardown(AbstractFSContractTestBase.java:204)
+  at 
org.apache.hadoop.fs.contract.AbstractContractOpenTest.teardown(AbstractContractOpenTest.java:64)
+  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+  at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
+  at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
+  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
+  at 

[11/50] [abbrv] hadoop git commit: YARN-7909. Add charset to YARN Service REST API. (Contributed by Eric Yang)

2018-02-15 Thread aengineer
YARN-7909. Add charset to YARN Service REST API. (Contributed by Eric Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c97d5bce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c97d5bce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c97d5bce

Branch: refs/heads/HDFS-7240
Commit: c97d5bceb2305e02f9e8b6c2c10a2aba7fdc652b
Parents: 543f3ab
Author: Eric Yang 
Authored: Fri Feb 9 20:01:25 2018 -0500
Committer: Eric Yang 
Committed: Fri Feb 9 20:01:25 2018 -0500

--
 .../apache/hadoop/yarn/service/webapp/ApiServer.java   | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97d5bce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 16f8513..e58938e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -94,7 +94,7 @@ public class ApiServer {
   @GET
   @Path(VERSION)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response getVersion() {
 String version = VersionInfo.getBuildVersion();
 LOG.info(version);
@@ -104,7 +104,7 @@ public class ApiServer {
   @POST
   @Path(SERVICE_ROOT_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response createService(@Context HttpServletRequest request,
   Service service) {
 ServiceStatus serviceStatus = new ServiceStatus();
@@ -167,7 +167,7 @@ public class ApiServer {
   @GET
   @Path(SERVICE_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response getService(@Context HttpServletRequest request,
   @PathParam(SERVICE_NAME) String appName) {
 ServiceStatus serviceStatus = new ServiceStatus();
@@ -210,7 +210,7 @@ public class ApiServer {
   @DELETE
   @Path(SERVICE_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response deleteService(@Context HttpServletRequest request,
   @PathParam(SERVICE_NAME) String appName) {
 try {
@@ -273,7 +273,8 @@ public class ApiServer {
   @PUT
   @Path(COMPONENT_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN  })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8",
+  MediaType.TEXT_PLAIN  })
   public Response updateComponent(@Context HttpServletRequest request,
   @PathParam(SERVICE_NAME) String appName,
   @PathParam(COMPONENT_NAME) String componentName, Component component) {
@@ -322,7 +323,7 @@ public class ApiServer {
   @PUT
   @Path(SERVICE_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response updateService(@Context HttpServletRequest request,
   @PathParam(SERVICE_NAME) String appName,
   Service updateServiceData) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: xattr api cleanup

2018-02-15 Thread aengineer
xattr api cleanup


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da59acd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da59acd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da59acd8

Branch: refs/heads/HDFS-7240
Commit: da59acd8ca9ab5b49b988ffca64e8cce91c5f741
Parents: 481d79f
Author: Kihwal Lee 
Authored: Thu Feb 15 11:11:55 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:11:55 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 
 2 files changed, 51 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da59acd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 3223467..be3092c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -137,8 +137,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da59acd8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 43eeadf..b5f7573 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -843,28 +843,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1109,20 +1118,48 @@ public class FSXAttrBaseTest {
 }
 
 /*
-* Test that only user who have parent directory execute access
-*  can see raw.* xattrs returned from listXAttr
+* Test that user who have parent directory execute access
+*  can also not see raw.* xattrs returned from listXAttr
 */
-// non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-assertTrue(xattrNames.size() == 0);
+try {
+  // non-raw path
+  userFs.listXAttrs(path);
+  fail("listXAttr should have 

[30/50] [abbrv] hadoop git commit: HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar.

2018-02-15 Thread aengineer
HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. 
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332269de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332269de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332269de

Branch: refs/heads/HDFS-7240
Commit: 332269de065d0f40eb54ee5e53b765217c24081e
Parents: c5e6e3d
Author: Arpit Agarwal 
Authored: Tue Feb 13 10:14:16 2018 -0800
Committer: Arpit Agarwal 
Committed: Tue Feb 13 10:14:16 2018 -0800

--
 .../client/KerberosAuthenticator.java   | 80 +---
 .../client/TestKerberosAuthenticator.java   | 29 +++
 2 files changed, 82 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332269de/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 942d13c..64d4330 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -13,6 +13,8 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
+import com.google.common.annotations.VisibleForTesting;
+import java.lang.reflect.Constructor;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
@@ -177,41 +179,65 @@ public class KerberosAuthenticator implements 
Authenticator {
*/
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
-throws IOException, AuthenticationException {
+  throws IOException, AuthenticationException {
 if (!token.isSet()) {
   this.url = url;
   base64 = new Base64(0);
-  HttpURLConnection conn = token.openConnection(url, connConfigurator);
-  conn.setRequestMethod(AUTH_HTTP_METHOD);
-  conn.connect();
-  
-  boolean needFallback = false;
-  if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
-LOG.debug("JDK performed authentication on our behalf.");
-// If the JDK already did the SPNEGO back-and-forth for
-// us, just pull out the token.
-AuthenticatedURL.extractToken(conn, token);
-if (isTokenKerberos(token)) {
-  return;
+  try {
+HttpURLConnection conn = token.openConnection(url, connConfigurator);
+conn.setRequestMethod(AUTH_HTTP_METHOD);
+conn.connect();
+
+boolean needFallback = false;
+if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+  LOG.debug("JDK performed authentication on our behalf.");
+  // If the JDK already did the SPNEGO back-and-forth for
+  // us, just pull out the token.
+  AuthenticatedURL.extractToken(conn, token);
+  if (isTokenKerberos(token)) {
+return;
+  }
+  needFallback = true;
 }
-needFallback = true;
-  }
-  if (!needFallback && isNegotiate(conn)) {
-LOG.debug("Performing our own SPNEGO sequence.");
-doSpnegoSequence(token);
-  } else {
-LOG.debug("Using fallback authenticator sequence.");
-Authenticator auth = getFallBackAuthenticator();
-// Make sure that the fall back authenticator have the same
-// ConnectionConfigurator, since the method might be overridden.
-// Otherwise the fall back authenticator might not have the information
-// to make the connection (e.g., SSL certificates)
-auth.setConnectionConfigurator(connConfigurator);
-auth.authenticate(url, token);
+if (!needFallback && isNegotiate(conn)) {
+  LOG.debug("Performing our own SPNEGO sequence.");
+  doSpnegoSequence(token);
+} else {
+  LOG.debug("Using fallback authenticator sequence.");
+  Authenticator auth = getFallBackAuthenticator();
+  // Make sure that the fall back authenticator have the same
+  // ConnectionConfigurator, since the method might be overridden.
+  // Otherwise the fall back authenticator might not have the
+  // information to make the connection (e.g., SSL certificates)
+  

[31/50] [abbrv] hadoop git commit: HADOOP-15040 Upgrade AWS SDK to 1.11.271: NPE bug spams logs w/ Yarn Log Aggregation

2018-02-15 Thread aengineer
HADOOP-15040 Upgrade AWS SDK to 1.11.271: NPE bug spams logs w/ Yarn Log 
Aggregation


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60971b81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60971b81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60971b81

Branch: refs/heads/HDFS-7240
Commit: 60971b8195c954c109e83cdbd1c94c700da4a271
Parents: 332269d
Author: Aaron Fabbri 
Authored: Tue Feb 13 18:38:22 2018 -0800
Committer: Aaron Fabbri 
Committed: Tue Feb 13 18:51:22 2018 -0800

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60971b81/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index dd8465a..c27596c 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -137,7 +137,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
-1.11.199
+1.11.271
 2.3.4
 1.5
 

[12/50] [abbrv] hadoop git commit: Preparing for 3.2.0 development

2018-02-15 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 1fafe77..a0e530a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -20,11 +20,11 @@
   
 hadoop-yarn
 org.apache.hadoop
-3.1.0-SNAPSHOT
+3.2.0-SNAPSHOT
   
   4.0.0
   hadoop-yarn-ui
-  3.1.0-SNAPSHOT
+  3.2.0-SNAPSHOT
   Apache Hadoop YARN UI
   ${packagingType}
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/pom.xml
index e4e611b..6110476 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -16,11 +16,11 @@
   
 org.apache.hadoop
 hadoop-project
-3.1.0-SNAPSHOT
+3.2.0-SNAPSHOT
 ../../hadoop-project
   
   hadoop-yarn
-  3.1.0-SNAPSHOT
+  3.2.0-SNAPSHOT
   pom
   Apache Hadoop YARN
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/pom.xml
--
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index 3ef9c45..4593441 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -18,11 +18,11 @@
   
 org.apache.hadoop
 hadoop-project
-3.1.0-SNAPSHOT
+3.2.0-SNAPSHOT
 ../hadoop-project
   
   hadoop-yarn-project
-  3.1.0-SNAPSHOT
+  3.2.0-SNAPSHOT
   pom
   Apache Hadoop YARN Project
   http://hadoop.apache.org/yarn/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/pom.xml
--
diff --git a/pom.xml b/pom.xml
index d776678..a51c42a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
   4.0.0
   org.apache.hadoop
   hadoop-main
-  3.1.0-SNAPSHOT
+  3.2.0-SNAPSHOT
   Apache Hadoop Main
   Apache Hadoop Main
   pom


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: Revert "YARN-7813: Capacity Scheduler Intra-queue Preemption should be configurable for each queue"

2018-02-15 Thread aengineer
Revert "YARN-7813: Capacity Scheduler Intra-queue Preemption should be 
configurable for each queue"

This reverts commit c5e6e3de1c31eda052f89eddd7bba288625936b9.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddfe42e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddfe42e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddfe42e

Branch: refs/heads/HDFS-7240
Commit: bddfe42e2ccda2e6cc9d149461640ba36eca5922
Parents: 1f20f43
Author: Eric Payne 
Authored: Wed Feb 14 14:29:20 2018 -0600
Committer: Eric Payne 
Committed: Wed Feb 14 14:29:20 2018 -0600

--
 .../hadoop/yarn/api/records/QueueInfo.java  | 35 ---
 .../src/main/proto/yarn_protos.proto|  1 -
 .../apache/hadoop/yarn/client/cli/QueueCLI.java |  6 --
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 96 ++--
 .../api/records/impl/pb/QueueInfoPBImpl.java| 13 ---
 .../hadoop/yarn/api/TestPBImplRecords.java  |  2 +-
 .../capacity/IntraQueueCandidatesSelector.java  |  4 +-
 .../scheduler/capacity/AbstractCSQueue.java | 72 ++-
 .../scheduler/capacity/CSQueue.java | 16 +---
 .../CapacitySchedulerConfiguration.java | 15 ---
 .../webapp/CapacitySchedulerPage.java   |  5 +-
 .../dao/CapacitySchedulerLeafQueueInfo.java |  6 --
 .../TestConfigurationMutationACLPolicies.java   |  2 +-
 .../TestSchedulerApplicationAttempt.java|  2 +-
 .../scheduler/capacity/TestLeafQueue.java   |  2 +-
 .../src/site/markdown/CapacityScheduler.md  |  3 +-
 17 files changed, 26 insertions(+), 257 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
index 57ea9bf..897b442 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
@@ -94,26 +94,6 @@ public abstract class QueueInfo {
 return queueInfo;
   }
 
-  @Private
-  @Unstable
-  public static QueueInfo newInstance(String queueName, float capacity,
-  float maximumCapacity, float currentCapacity,
-  List childQueues, List applications,
-  QueueState queueState, Set accessibleNodeLabels,
-  String defaultNodeLabelExpression, QueueStatistics queueStatistics,
-  boolean preemptionDisabled,
-  Map queueConfigurations,
-  boolean intraQueuePreemptionDisabled) {
-QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity,
-maximumCapacity, currentCapacity,
-childQueues, applications,
-queueState, accessibleNodeLabels,
-defaultNodeLabelExpression, queueStatistics,
-preemptionDisabled, queueConfigurations);
-queueInfo.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
-return queueInfo;
-  }
-
   /**
* Get the name of the queue.
* @return name of the queue
@@ -281,19 +261,4 @@ public abstract class QueueInfo {
   @Unstable
   public abstract void setQueueConfigurations(
   Map queueConfigurations);
-
-
-  /**
-   * Get the intra-queue preemption status of the queue.
-   * @return if property is not in proto, return null;
-   *otherwise, return intra-queue preemption status of the queue
-   */
-  @Public
-  @Stable
-  public abstract Boolean getIntraQueuePreemptionDisabled();
-
-  @Private
-  @Unstable
-  public abstract void setIntraQueuePreemptionDisabled(
-  boolean intraQueuePreemptionDisabled);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index b978761..25c8569 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -569,7 +569,6 @@ message QueueInfoProto {
   optional QueueStatisticsProto queueStatistics = 10;
   optional bool 

[28/50] [abbrv] hadoop git commit: HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine

2018-02-15 Thread aengineer
HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh 
may not be accessible. Contributed by Grigori Rybkine


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c5d7d71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c5d7d71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c5d7d71

Branch: refs/heads/HDFS-7240
Commit: 0c5d7d71a80bccd4ad7eab269d0727b999606a7e
Parents: 9cc6d1d
Author: Chris Douglas 
Authored: Mon Feb 12 21:07:15 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 12 21:07:15 2018 -0800

--
 .../src/test/scripts/start-build-env.bats   | 102 +++
 start-build-env.sh  |  32 +-
 2 files changed, 131 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c5d7d71/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
new file mode 100644
index 000..dbb14ad
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+shift
+echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+shift
+until [ $# -eq 0 ]; do
+  if [ "$1" = -v ]; then
+shift
+echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+  printf "Mounted %s with z option.\n", $1
+  else if (NF == 2)
+  printf "Mounted %s without z option.\n", $1}'
+  fi
+  shift
+done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+if [ -z "$USER" ]; then
+  USER=${HOME##*/}
+fi
+export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+printf 'mock_u:mock_r:mock_t:s0'
+  else
+command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux is enabled." ]]
+  [[ ${lines[1]} =~ \
+ "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+ "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c5d7d71/start-build-env.sh
--
diff --git a/start-build-env.sh b/start-build-env.sh
index 5a18151..4da55af 100755
--- 

hadoop git commit: YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 41708402a -> 6f64530fc


YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f64530f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f64530f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f64530f

Branch: refs/heads/branch-3.1
Commit: 6f64530fc35bbc954a84d64f349379dc15fe0898
Parents: 4170840
Author: Jason Lowe 
Authored: Thu Feb 15 17:09:00 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 17:14:39 2018 -0600

--
 .../java/org/apache/hadoop/yarn/util/Apps.java  |  22 ++-
 .../yarn/util/AuxiliaryServiceHelper.java   |   2 +-
 .../server/nodemanager/ContainerExecutor.java   |  62 ++---
 .../nodemanager/LinuxContainerExecutor.java |   8 --
 .../launcher/ContainerLaunch.java   |  88 
 .../runtime/DefaultLinuxContainerRuntime.java   |   6 -
 .../DelegatingLinuxContainerRuntime.java|  11 --
 .../runtime/DockerLinuxContainerRuntime.java|   7 -
 .../runtime/ContainerRuntime.java   |  11 --
 .../launcher/TestContainerLaunch.java   | 133 +--
 10 files changed, 240 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f64530f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
index 685c6d3..1c90d55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
 
 import java.io.File;
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.regex.Matcher;
@@ -105,7 +106,26 @@ public class Apps {
   }
 }
   }
-  
+
+  /**
+   *
+   * @param envString String containing env variable definitions
+   * @param classPathSeparator String that separates the definitions
+   * @return ArrayList of environment variable names
+   */
+  public static ArrayList getEnvVarsFromInputString(String envString,
+  String classPathSeparator) {
+ArrayList envList = new ArrayList<>();
+if (envString != null && envString.length() > 0) {
+  Matcher varValMatcher = VARVAL_SPLITTER.matcher(envString);
+  while (varValMatcher.find()) {
+String envVar = varValMatcher.group(1);
+envList.add(envVar);
+  }
+}
+return envList;
+  }
+
   /**
* This older version of this method is kept around for compatibility
* because downstream frameworks like Spark and Tez have been using it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f64530f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
index cb118f5..1374d96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
@@ -45,7 +45,7 @@ public class AuxiliaryServiceHelper {
 Base64.encodeBase64String(byteData));
   }
 
-  private static String getPrefixServiceName(String serviceName) {
+  public static String getPrefixServiceName(String serviceName) {
 return NM_AUX_SERVICE + serviceName;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f64530f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 

hadoop git commit: YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0b489e564 -> 8013475d4


YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8013475d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8013475d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8013475d

Branch: refs/heads/trunk
Commit: 8013475d447a8377b5aed858208bf8b91dd32366
Parents: 0b489e5
Author: Jason Lowe 
Authored: Thu Feb 15 17:09:00 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 17:09:00 2018 -0600

--
 .../java/org/apache/hadoop/yarn/util/Apps.java  |  22 ++-
 .../yarn/util/AuxiliaryServiceHelper.java   |   2 +-
 .../server/nodemanager/ContainerExecutor.java   |  62 ++---
 .../nodemanager/LinuxContainerExecutor.java |   8 --
 .../launcher/ContainerLaunch.java   |  88 
 .../runtime/DefaultLinuxContainerRuntime.java   |   6 -
 .../DelegatingLinuxContainerRuntime.java|  11 --
 .../runtime/DockerLinuxContainerRuntime.java|   7 -
 .../runtime/ContainerRuntime.java   |  11 --
 .../launcher/TestContainerLaunch.java   | 133 +--
 10 files changed, 240 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8013475d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
index 685c6d3..1c90d55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
 
 import java.io.File;
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.regex.Matcher;
@@ -105,7 +106,26 @@ public class Apps {
   }
 }
   }
-  
+
+  /**
+   *
+   * @param envString String containing env variable definitions
+   * @param classPathSeparator String that separates the definitions
+   * @return ArrayList of environment variable names
+   */
+  public static ArrayList getEnvVarsFromInputString(String envString,
+  String classPathSeparator) {
+ArrayList envList = new ArrayList<>();
+if (envString != null && envString.length() > 0) {
+  Matcher varValMatcher = VARVAL_SPLITTER.matcher(envString);
+  while (varValMatcher.find()) {
+String envVar = varValMatcher.group(1);
+envList.add(envVar);
+  }
+}
+return envList;
+  }
+
   /**
* This older version of this method is kept around for compatibility
* because downstream frameworks like Spark and Tez have been using it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8013475d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
index cb118f5..1374d96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
@@ -45,7 +45,7 @@ public class AuxiliaryServiceHelper {
 Base64.encodeBase64String(byteData));
   }
 
-  private static String getPrefixServiceName(String serviceName) {
+  public static String getPrefixServiceName(String serviceName) {
 return NM_AUX_SERVICE + serviceName;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8013475d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 

[1/4] hadoop git commit: YARN-7920. Simplify configuration for PlacementConstraints. Contributed by Wangda Tan.

2018-02-15 Thread kkaranasos
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 4cdc57f6a -> 41708402a
  refs/heads/trunk 47473952e -> 0b489e564


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b489e56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
new file mode 100644
index 000..6af62e7
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
@@ -0,0 +1,136 @@
+
+
+Placement Constraints
+=
+
+
+
+
+Overview
+
+
+YARN allows applications to specify placement constraints in the form of data 
locality (preference to specific nodes or racks) or (non-overlapping) node 
labels. This document focuses on more expressive placement constraints in YARN. 
Such constraints can be crucial for the performance and resilience of 
applications, especially those that include long-running containers, such as 
services, machine-learning and streaming workloads.
+
+For example, it may be beneficial to co-locate the allocations of a job on the 
same rack (*affinity* constraints) to reduce network costs, spread allocations 
across machines (*anti-affinity* constraints) to minimize resource 
interference, or allow up to a specific number of allocations in a node group 
(*cardinality* constraints) to strike a balance between the two. Placement 
decisions also affect resilience. For example, allocations placed within the 
same cluster upgrade domain would go offline simultaneously.
+
+The applications can specify constraints without requiring knowledge of the 
underlying topology of the cluster (e.g., one does not need to specify the 
specific node or rack where their containers should be placed with constraints) 
or the other applications deployed. Currently **intra-application** constraints 
are supported, but the design that is followed is generic and support for 
constraints across applications will soon be added. Moreover, all constraints 
at the moment are **hard**, that is, if the constraints for a container cannot 
be satisfied due to the current cluster condition or conflicting constraints, 
the container request will remain pending or get will get rejected.
+
+Note that in this document we use the notion of “allocation” to refer to a 
unit of resources (e.g., CPU and memory) that gets allocated in a node. In the 
current implementation of YARN, an allocation corresponds to a single 
container. However, in case an application uses an allocation to spawn more 
than one containers, an allocation could correspond to multiple containers.
+
+
+Quick Guide
+---
+
+We first describe how to enable scheduling with placement constraints and then 
provide examples of how to experiment with this feature using the distributed 
shell, an application that allows to run a given shell command on a set of 
containers.
+
+### Enabling placement constraints
+
+To enable placement constraints, the following property has to be set to 
`placement-processor` or `scheduler` in **conf/yarn-site.xml**:
+
+| Property | Description | Default value |
+|: |:--- |:- |
+| `yarn.resourcemanager.placement-constraints.handler` | Specify which handler 
will be used to process PlacementConstraints. Acceptable values are: 
`placement-processor`, `scheduler`, and `disabled`. | `disabled` |
+
+We now give more details about each of the three placement constraint handlers:
+
+* `placement-processor`: Using this handler, the placement of containers with 
constraints is determined as a pre-processing step before the capacity or the 
fair scheduler is called. Once the placement is decided, the capacity/fair 
scheduler is invoked to perform the actual allocation. The advantage of this 
handler is that it supports all constraint types (affinity, anti-affinity, 
cardinality). Moreover, it considers multiple containers at a time, which 
allows to satisfy more constraints than a container-at-a-time approach can 
achieve. As it sits outside the main scheduler, it can be used by both the 
capacity and fair schedulers. Note that at the moment it does not account for 
task priorities within an application, given that such priorities might be 
conflicting with the placement constraints.
+* `scheduler`: Using this handler, containers with constraints will be placed 
by the main scheduler (as of now, only the capacity scheduler supports 
SchedulingRequests). It currently supports anti-affinity constraints (no 
affinity or cardinality). The advantage of this handler, when compared to the 
`placement-processor`, is that it follows the same ordering rules for queues 
(sorted by utilization, priority), apps (sorted by FIFO/fairness/priority) and 
tasks within 

[4/4] hadoop git commit: YARN-7920. Simplify configuration for PlacementConstraints. Contributed by Wangda Tan.

2018-02-15 Thread kkaranasos
YARN-7920. Simplify configuration for PlacementConstraints. Contributed by 
Wangda Tan.

(cherry picked from commit 0b489e564ce5a50324a530e29c18aa8a75276c50)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41708402
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41708402
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41708402

Branch: refs/heads/branch-3.1
Commit: 41708402a1c1033d8829aad23db9cfe90de77acd
Parents: 4cdc57f
Author: Konstantinos Karanasos 
Authored: Thu Feb 15 14:23:27 2018 -0800
Committer: Konstantinos Karanasos 
Committed: Thu Feb 15 14:25:34 2018 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  54 ++-
 .../TestAMRMClientPlacementConstraints.java |   3 +-
 .../src/main/resources/yarn-default.xml |  10 +-
 .../ApplicationMasterService.java   |  46 ++-
 .../scheduler/capacity/CapacityScheduler.java   |  13 -
 .../CapacitySchedulerConfiguration.java |   5 -
 .../processor/AbstractPlacementProcessor.java   |  96 +
 .../processor/DisabledPlacementProcessor.java   |  77 
 .../processor/PlacementConstraintProcessor.java | 340 +
 .../processor/PlacementProcessor.java   | 377 ---
 .../processor/SchedulerPlacementProcessor.java  |  55 +++
 ...apacitySchedulerSchedulingRequestUpdate.java |   4 +
 ...estSchedulingRequestContainerAllocation.java |   8 +-
 ...hedulingRequestContainerAllocationAsync.java |   4 +-
 .../scheduler/capacity/TestUtils.java   |   4 +-
 .../constraint/TestPlacementProcessor.java  |  12 +-
 .../src/site/markdown/PlacementConstraints.md   | 136 +++
 .../site/markdown/PlacementConstraints.md.vm| 149 
 18 files changed, 818 insertions(+), 575 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41708402/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 118f9fb..6677478 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -532,11 +532,57 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_SCHEDULER = 
 RM_PREFIX + "scheduler.class";
 
-  /** Enable rich placement constraints. */
-  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
-  RM_PREFIX + "placement-constraints.enabled";
+  /**
+   * Specify which handler will be used to process PlacementConstraints.
+   * For details on PlacementConstraints, please refer to
+   * {@link org.apache.hadoop.yarn.api.resource.PlacementConstraint}
+   */
+  @Private
+  public static final String RM_PLACEMENT_CONSTRAINTS_HANDLER =
+  RM_PREFIX + "placement-constraints.handler";
+
+  /**
+   * This handler rejects all allocate calls made by an application, if they
+   * contain a {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}.
+   */
+  @Private
+  public static final String DISABLED_RM_PLACEMENT_CONSTRAINTS_HANDLER =
+  "disabled";
 
-  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = false;
+  /**
+   * Using this handler, the placement of containers with constraints is
+   * determined as a pre-processing step before the capacity or the fair
+   * scheduler is called. Once the placement is decided, the capacity/fair
+   * scheduler is invoked to perform the actual allocation. The advantage of
+   * this approach is that it supports all constraint types (affinity,
+   * anti-affinity, cardinality). Moreover, it considers multiple containers at
+   * a time, which allows to satisfy more constraints than a 
container-at-a-time
+   * approach can achieve. As it sits outside the main scheduler, it can be 
used
+   * by both the capacity and fair schedulers. Note that at the moment it does
+   * not account for task priorities within an application, given that such
+   * priorities might be conflicting with the placement constraints.
+   */
+  @Private
+  public static final String PROCESSOR_RM_PLACEMENT_CONSTRAINTS_HANDLER =
+  "placement-processor";
+
+  /**
+   * Using this handler, containers with constraints will be placed by the main
+   * scheduler. If the configured RM scheduler
+   * yarn.resourcemanager.scheduler.class
+   * cannot handle placement constraints, 

[3/4] hadoop git commit: YARN-7920. Simplify configuration for PlacementConstraints. Contributed by Wangda Tan.

2018-02-15 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/41708402/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
new file mode 100644
index 000..6af62e7
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
@@ -0,0 +1,136 @@
+
+
+Placement Constraints
+=
+
+
+
+
+Overview
+
+
+YARN allows applications to specify placement constraints in the form of data 
locality (preference to specific nodes or racks) or (non-overlapping) node 
labels. This document focuses on more expressive placement constraints in YARN. 
Such constraints can be crucial for the performance and resilience of 
applications, especially those that include long-running containers, such as 
services, machine-learning and streaming workloads.
+
+For example, it may be beneficial to co-locate the allocations of a job on the 
same rack (*affinity* constraints) to reduce network costs, spread allocations 
across machines (*anti-affinity* constraints) to minimize resource 
interference, or allow up to a specific number of allocations in a node group 
(*cardinality* constraints) to strike a balance between the two. Placement 
decisions also affect resilience. For example, allocations placed within the 
same cluster upgrade domain would go offline simultaneously.
+
+The applications can specify constraints without requiring knowledge of the 
underlying topology of the cluster (e.g., one does not need to specify the 
specific node or rack where their containers should be placed with constraints) 
or the other applications deployed. Currently **intra-application** constraints 
are supported, but the design that is followed is generic and support for 
constraints across applications will soon be added. Moreover, all constraints 
at the moment are **hard**, that is, if the constraints for a container cannot 
be satisfied due to the current cluster condition or conflicting constraints, 
the container request will remain pending or get will get rejected.
+
+Note that in this document we use the notion of “allocation” to refer to a 
unit of resources (e.g., CPU and memory) that gets allocated in a node. In the 
current implementation of YARN, an allocation corresponds to a single 
container. However, in case an application uses an allocation to spawn more 
than one containers, an allocation could correspond to multiple containers.
+
+
+Quick Guide
+---
+
+We first describe how to enable scheduling with placement constraints and then 
provide examples of how to experiment with this feature using the distributed 
shell, an application that allows to run a given shell command on a set of 
containers.
+
+### Enabling placement constraints
+
+To enable placement constraints, the following property has to be set to 
`placement-processor` or `scheduler` in **conf/yarn-site.xml**:
+
+| Property | Description | Default value |
+|: |:--- |:- |
+| `yarn.resourcemanager.placement-constraints.handler` | Specify which handler 
will be used to process PlacementConstraints. Acceptable values are: 
`placement-processor`, `scheduler`, and `disabled`. | `disabled` |
+
+We now give more details about each of the three placement constraint handlers:
+
+* `placement-processor`: Using this handler, the placement of containers with 
constraints is determined as a pre-processing step before the capacity or the 
fair scheduler is called. Once the placement is decided, the capacity/fair 
scheduler is invoked to perform the actual allocation. The advantage of this 
handler is that it supports all constraint types (affinity, anti-affinity, 
cardinality). Moreover, it considers multiple containers at a time, which 
allows to satisfy more constraints than a container-at-a-time approach can 
achieve. As it sits outside the main scheduler, it can be used by both the 
capacity and fair schedulers. Note that at the moment it does not account for 
task priorities within an application, given that such priorities might be 
conflicting with the placement constraints.
+* `scheduler`: Using this handler, containers with constraints will be placed 
by the main scheduler (as of now, only the capacity scheduler supports 
SchedulingRequests). It currently supports anti-affinity constraints (no 
affinity or cardinality). The advantage of this handler, when compared to the 
`placement-processor`, is that it follows the same ordering rules for queues 
(sorted by utilization, priority), apps (sorted by FIFO/fairness/priority) and 
tasks within the same app (priority) that are enforced by the existing main 
scheduler.
+* `disabled`: Using this handler, if a 

[2/4] hadoop git commit: YARN-7920. Simplify configuration for PlacementConstraints. Contributed by Wangda Tan.

2018-02-15 Thread kkaranasos
YARN-7920. Simplify configuration for PlacementConstraints. Contributed by 
Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b489e56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b489e56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b489e56

Branch: refs/heads/trunk
Commit: 0b489e564ce5a50324a530e29c18aa8a75276c50
Parents: 4747395
Author: Konstantinos Karanasos 
Authored: Thu Feb 15 14:23:27 2018 -0800
Committer: Konstantinos Karanasos 
Committed: Thu Feb 15 14:23:38 2018 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  54 ++-
 .../TestAMRMClientPlacementConstraints.java |   3 +-
 .../src/main/resources/yarn-default.xml |  10 +-
 .../ApplicationMasterService.java   |  46 ++-
 .../scheduler/capacity/CapacityScheduler.java   |  13 -
 .../CapacitySchedulerConfiguration.java |   5 -
 .../processor/AbstractPlacementProcessor.java   |  96 +
 .../processor/DisabledPlacementProcessor.java   |  77 
 .../processor/PlacementConstraintProcessor.java | 340 +
 .../processor/PlacementProcessor.java   | 377 ---
 .../processor/SchedulerPlacementProcessor.java  |  55 +++
 ...apacitySchedulerSchedulingRequestUpdate.java |   4 +
 ...estSchedulingRequestContainerAllocation.java |   8 +-
 ...hedulingRequestContainerAllocationAsync.java |   4 +-
 .../scheduler/capacity/TestUtils.java   |   4 +-
 .../constraint/TestPlacementProcessor.java  |  12 +-
 .../src/site/markdown/PlacementConstraints.md   | 136 +++
 .../site/markdown/PlacementConstraints.md.vm| 149 
 18 files changed, 818 insertions(+), 575 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b489e56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 118f9fb..6677478 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -532,11 +532,57 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_SCHEDULER = 
 RM_PREFIX + "scheduler.class";
 
-  /** Enable rich placement constraints. */
-  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
-  RM_PREFIX + "placement-constraints.enabled";
+  /**
+   * Specify which handler will be used to process PlacementConstraints.
+   * For details on PlacementConstraints, please refer to
+   * {@link org.apache.hadoop.yarn.api.resource.PlacementConstraint}
+   */
+  @Private
+  public static final String RM_PLACEMENT_CONSTRAINTS_HANDLER =
+  RM_PREFIX + "placement-constraints.handler";
+
+  /**
+   * This handler rejects all allocate calls made by an application, if they
+   * contain a {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}.
+   */
+  @Private
+  public static final String DISABLED_RM_PLACEMENT_CONSTRAINTS_HANDLER =
+  "disabled";
 
-  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = false;
+  /**
+   * Using this handler, the placement of containers with constraints is
+   * determined as a pre-processing step before the capacity or the fair
+   * scheduler is called. Once the placement is decided, the capacity/fair
+   * scheduler is invoked to perform the actual allocation. The advantage of
+   * this approach is that it supports all constraint types (affinity,
+   * anti-affinity, cardinality). Moreover, it considers multiple containers at
+   * a time, which allows to satisfy more constraints than a 
container-at-a-time
+   * approach can achieve. As it sits outside the main scheduler, it can be 
used
+   * by both the capacity and fair schedulers. Note that at the moment it does
+   * not account for task priorities within an application, given that such
+   * priorities might be conflicting with the placement constraints.
+   */
+  @Private
+  public static final String PROCESSOR_RM_PLACEMENT_CONSTRAINTS_HANDLER =
+  "placement-processor";
+
+  /**
+   * Using this handler, containers with constraints will be placed by the main
+   * scheduler. If the configured RM scheduler
+   * yarn.resourcemanager.scheduler.class
+   * cannot handle placement constraints, the corresponding SchedulingRequests
+   * will be rejected. As of now, only 

[1/2] hadoop git commit: HDFS-13149. Ozone: Rename Corona to Freon. Contributed by Anu Engineer.

2018-02-15 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 f3d07efac -> fc84744f7


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc84744f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/tools/TestCorona.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/tools/TestCorona.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/tools/TestCorona.java
deleted file mode 100644
index 3d5a129..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/tools/TestCorona.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.tools;
-
-import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Tests Corona, with MiniOzoneCluster.
- */
-public class TestCorona {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * 
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-conf = new OzoneConfiguration();
-conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-cluster = new MiniOzoneClassicCluster.Builder(conf)
-.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
-.numDataNodes(5).build();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-if (cluster != null) {
-  cluster.shutdown();
-}
-  }
-
-  @Test
-  public void defaultTest() throws Exception {
-List args = new ArrayList<>();
-args.add("-numOfVolumes");
-args.add("2");
-args.add("-numOfBuckets");
-args.add("5");
-args.add("-numOfKeys");
-args.add("10");
-Corona corona = new Corona(conf);
-int res = ToolRunner.run(conf, corona,
-args.toArray(new String[0]));
-Assert.assertEquals(2, corona.getNumberOfVolumesCreated());
-Assert.assertEquals(10, corona.getNumberOfBucketsCreated());
-Assert.assertEquals(100, corona.getNumberOfKeysAdded());
-Assert.assertEquals(10240 - 36, corona.getKeyValueLength());
-Assert.assertEquals(0, res);
-  }
-
-  @Test
-  public void validateWriteTest() throws Exception {
-PrintStream originalStream = System.out;
-ByteArrayOutputStream outStream = new ByteArrayOutputStream();
-System.setOut(new PrintStream(outStream));
-List args = new ArrayList<>();
-args.add("-validateWrites");
-args.add("-numOfVolumes");
-args.add("2");
-args.add("-numOfBuckets");
-args.add("5");
-args.add("-numOfKeys");
-args.add("10");
-Corona corona = new Corona(conf);
-int res = ToolRunner.run(conf, corona,
-args.toArray(new String[0]));
-Assert.assertEquals(0, res);
-Assert.assertEquals(2, corona.getNumberOfVolumesCreated());
-Assert.assertEquals(10, corona.getNumberOfBucketsCreated());
-Assert.assertEquals(100, corona.getNumberOfKeysAdded());
-Assert.assertTrue(corona.getValidateWrites());
-Assert.assertNotEquals(0, corona.getTotalKeysValidated());
-Assert.assertNotEquals(0, corona.getSuccessfulValidationCount());
-Assert.assertEquals(0, corona.getUnsuccessfulValidationCount());
-System.setOut(originalStream);
-  }
-
-  @Test
-  public void multiThread() throws Exception {
-List args = new ArrayList<>();
-args.add("-numOfVolumes");
-args.add("10");
-args.add("-numOfBuckets");
-args.add("1");
-

[2/2] hadoop git commit: HDFS-13149. Ozone: Rename Corona to Freon. Contributed by Anu Engineer.

2018-02-15 Thread aengineer
HDFS-13149. Ozone: Rename Corona to Freon. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc84744f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc84744f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc84744f

Branch: refs/heads/HDFS-7240
Commit: fc84744f757992b4a1dfdd41bc7a6303f17d0406
Parents: f3d07ef
Author: Anu Engineer 
Authored: Thu Feb 15 13:50:48 2018 -0800
Committer: Anu Engineer 
Committed: Thu Feb 15 13:50:48 2018 -0800

--
 .../hadoop-hdfs/src/main/bin/hdfs   |8 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |2 -
 .../org/apache/hadoop/ozone/tools/Corona.java   | 1146 --
 .../org/apache/hadoop/ozone/tools/Freon.java| 1146 ++
 .../src/site/markdown/OzoneGettingStarted.md.vm |8 +-
 .../src/site/markdown/OzoneOverview.md  |4 +-
 .../apache/hadoop/ozone/tools/TestCorona.java   |  165 ---
 .../apache/hadoop/ozone/tools/TestFreon.java|  165 +++
 .../org/apache/hadoop/test/OzoneTestDriver.java |4 +-
 9 files changed, 1323 insertions(+), 1325 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc84744f/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 6d08751..4be674b 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -38,7 +38,6 @@ function hadoop_usage
   hadoop_add_subcommand "cblock" admin "cblock CLI"
   hadoop_add_subcommand "cblockserver" daemon "run cblock server"
   hadoop_add_subcommand "classpath" client "prints the class path needed to 
get the hadoop jar and the required libraries"
-  hadoop_add_subcommand "corona" client "run an ozone data generator"
   hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones"
   hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
   hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug 
commands"
@@ -50,6 +49,7 @@ function hadoop_usage
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment 
variables"
   hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
   hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the 
NameNode"
+  hadoop_add_subcommand "freon" client "runs an ozone data generator"
   hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
   hadoop_add_subcommand "getconf" client "get config values from configuration"
   hadoop_add_subcommand "groups" client "get the groups which users belong to"
@@ -107,9 +107,6 @@ function hdfscmd_case
 classpath)
   hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
 ;;
-corona)
-  HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Corona
-;;
 crypto)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CryptoAdmin
 ;;
@@ -160,6 +157,9 @@ function hdfscmd_case
 fetchdt)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
 ;;
+freon)
+  HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Freon
+;;
 fsck)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc84744f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 0f68fa5..a842a98 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -979,8 +979,6 @@ class FsDatasetImpl implements FsDatasetSpi {
 ReplicaInfo newReplicaInfo = targetVolume.moveBlockToTmpLocation(block,
 replicaInfo, smallBufferSize, conf);
 
- // Latch here --> wait for the signal.
-
 // Finalize the copied files
 newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
 try (AutoCloseableLock lock = datasetLock.acquire()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc84744f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java

hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 010ed19da -> 7f6ab3f9d


HDFS-13112. Token expiration edits may cause log corruption or deadlock. 
Contributed by Daryn Sharp.

(cherry picked from commit 47473952e56b0380147d42f4110ad03c2276c961)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f6ab3f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f6ab3f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f6ab3f9

Branch: refs/heads/branch-2.7
Commit: 7f6ab3f9d06f829407e159614ec1837b6033a259
Parents: 010ed19
Author: Kihwal Lee 
Authored: Thu Feb 15 15:49:09 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 15:49:33 2018 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../DelegationTokenSecretManager.java   | 53 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java |  5 +-
 .../namenode/TestSecurityTokenEditLog.java  | 24 -
 6 files changed, 86 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f6ab3f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6fab6fc..c93f24c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -36,6 +36,9 @@ Release 2.7.6 - UNRELEASED
 due to the race between replication and delete of same file in a
 large cluster. Contributed by He Xiaoqiao.
 
+HDFS-13112. Token expiration edits may cause log corruption or deadlock.
+(daryn via kihwal)
+
 Release 2.7.5 - 2017-12-14
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f6ab3f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index 8af7eba..193e8ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -365,34 +364,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before updating master key");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logUpdateMasterKey(key);
+}
+  } finally {
+namesystem.readUnlock();
   }
-  namesystem.logUpdateMasterKey(key);
+} catch (InterruptedException ie) {
+  // AbstractDelegationTokenManager may crash if an exception is thrown.
+  // The interrupt flag will be detected when it attempts to sleep.
+  Thread.currentThread().interrupt();
 }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail 

hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7a4064188 -> 010ed19da


MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko

(cherry picked from commit a53d62ab26e170a0338f93e228718da52e9196e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/010ed19d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/010ed19d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/010ed19d

Branch: refs/heads/branch-2.7
Commit: 010ed19da0a55e37059ca190c361d2c7ff77f403
Parents: 7a40641
Author: Jason Lowe 
Authored: Thu Feb 15 15:44:23 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:44:23 2018 -0600

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/010ed19d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index eea864d..bd8971f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -24,6 +24,9 @@ Release 2.7.6 - UNRELEASED
 MAPREDUCE-7048. Uber AM can crash due to unknown task in statusUpdate.
 (Peter Bacsko via jlowe)
 
+MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is
+flaky. (Peter Bacsko via jlowe)
+
 Release 2.7.5 - 2017-12-14
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/010ed19d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 8013feb..6281e71 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -300,7 +300,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8118d9927 -> 48d0398a1


HDFS-13112. Token expiration edits may cause log corruption or deadlock. 
Contributed by Daryn Sharp.

(cherry picked from commit 47473952e56b0380147d42f4110ad03c2276c961)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48d0398a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48d0398a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48d0398a

Branch: refs/heads/branch-2.8
Commit: 48d0398a14aff77e7f41ee4311b80f1a007fe46a
Parents: 8118d99
Author: Kihwal Lee 
Authored: Thu Feb 15 15:43:44 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 15:43:44 2018 -0600

--
 .../DelegationTokenSecretManager.java   | 53 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java |  5 +-
 .../namenode/TestSecurityTokenEditLog.java  | 24 -
 5 files changed, 83 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48d0398a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index b7f89a8..3547c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before updating master key");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logUpdateMasterKey(key);
+}
+  } finally {
+namesystem.readUnlock();
   }
-  namesystem.logUpdateMasterKey(key);
+} catch (InterruptedException ie) {
+  // AbstractDelegationTokenManager may crash if an exception is thrown.
+  // The interrupt flag will be detected when it attempts to sleep.
+  Thread.currentThread().interrupt();
 }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before expiring delegation token");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // 

hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 1871e6844 -> 01fc51fca


HDFS-13112. Token expiration edits may cause log corruption or deadlock. 
Contributed by Daryn Sharp.

(cherry picked from commit 47473952e56b0380147d42f4110ad03c2276c961)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01fc51fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01fc51fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01fc51fc

Branch: refs/heads/branch-2.9
Commit: 01fc51fca731c48626eea38584b7d880332762ff
Parents: 1871e68
Author: Kihwal Lee 
Authored: Thu Feb 15 15:39:23 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 15:39:23 2018 -0600

--
 .../DelegationTokenSecretManager.java   | 53 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java |  5 +-
 .../namenode/TestSecurityTokenEditLog.java  | 24 -
 5 files changed, 83 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01fc51fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index b7f89a8..3547c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before updating master key");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logUpdateMasterKey(key);
+}
+  } finally {
+namesystem.readUnlock();
   }
-  namesystem.logUpdateMasterKey(key);
+} catch (InterruptedException ie) {
+  // AbstractDelegationTokenManager may crash if an exception is thrown.
+  // The interrupt flag will be detected when it attempts to sleep.
+  Thread.currentThread().interrupt();
 }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before expiring delegation token");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // 

hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a87bccdc3 -> fe044e69c


HDFS-13112. Token expiration edits may cause log corruption or deadlock. 
Contributed by Daryn Sharp.

(cherry picked from commit 47473952e56b0380147d42f4110ad03c2276c961)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe044e69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe044e69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe044e69

Branch: refs/heads/branch-2
Commit: fe044e69c3a008653415edfe995d0492d1cf4012
Parents: a87bccd
Author: Kihwal Lee 
Authored: Thu Feb 15 15:38:49 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 15:38:49 2018 -0600

--
 .../DelegationTokenSecretManager.java   | 53 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java |  5 +-
 .../namenode/TestSecurityTokenEditLog.java  | 24 -
 5 files changed, 83 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe044e69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index b7f89a8..3547c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before updating master key");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logUpdateMasterKey(key);
+}
+  } finally {
+namesystem.readUnlock();
   }
-  namesystem.logUpdateMasterKey(key);
+} catch (InterruptedException ie) {
+  // AbstractDelegationTokenManager may crash if an exception is thrown.
+  // The interrupt flag will be detected when it attempts to sleep.
+  Thread.currentThread().interrupt();
 }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before expiring delegation token");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave 

hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 22f44d2b7 -> 8118d9927


MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko

(cherry picked from commit a53d62ab26e170a0338f93e228718da52e9196e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8118d992
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8118d992
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8118d992

Branch: refs/heads/branch-2.8
Commit: 8118d9927ce6c2015365469bdae8d104c769ae0b
Parents: 22f44d2
Author: Jason Lowe 
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:35:42 2018 -0600

--
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8118d992/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 8013feb..6281e71 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -300,7 +300,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 af3d084d6 -> 50e9419ce


HDFS-13112. Token expiration edits may cause log corruption or deadlock. 
Contributed by Daryn Sharp.

(cherry picked from commit 47473952e56b0380147d42f4110ad03c2276c961)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50e9419c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50e9419c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50e9419c

Branch: refs/heads/branch-3.0.1
Commit: 50e9419ce2d2b7aaefc6ae67810fe813987ca92d
Parents: af3d084
Author: Kihwal Lee 
Authored: Thu Feb 15 15:36:00 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 15:36:00 2018 -0600

--
 .../DelegationTokenSecretManager.java   | 53 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java |  5 +-
 .../namenode/TestSecurityTokenEditLog.java  | 24 -
 5 files changed, 83 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50e9419c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index b7f89a8..3547c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before updating master key");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logUpdateMasterKey(key);
+}
+  } finally {
+namesystem.readUnlock();
   }
-  namesystem.logUpdateMasterKey(key);
+} catch (InterruptedException ie) {
+  // AbstractDelegationTokenManager may crash if an exception is thrown.
+  // The interrupt flag will be detected when it attempts to sleep.
+  Thread.currentThread().interrupt();
 }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before expiring delegation token");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // 

hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 961876e94 -> 1871e6844


MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko

(cherry picked from commit a53d62ab26e170a0338f93e228718da52e9196e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1871e684
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1871e684
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1871e684

Branch: refs/heads/branch-2.9
Commit: 1871e6844e077833def1ce7ee5ff8d0e80ac310b
Parents: 961876e
Author: Jason Lowe 
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:31:50 2018 -0600

--
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1871e684/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 8013feb..6281e71 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -300,7 +300,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b9ce463fb -> 7b83fca45


HDFS-13112. Token expiration edits may cause log corruption or deadlock. 
Contributed by Daryn Sharp.

(cherry picked from commit 47473952e56b0380147d42f4110ad03c2276c961)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b83fca4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b83fca4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b83fca4

Branch: refs/heads/branch-3.0
Commit: 7b83fca4535869fe142f1706110dccce3ff80331
Parents: b9ce463
Author: Kihwal Lee 
Authored: Thu Feb 15 15:35:27 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 15:35:27 2018 -0600

--
 .../DelegationTokenSecretManager.java   | 53 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java |  5 +-
 .../namenode/TestSecurityTokenEditLog.java  | 24 -
 5 files changed, 83 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b83fca4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index b7f89a8..3547c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before updating master key");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logUpdateMasterKey(key);
+}
+  } finally {
+namesystem.readUnlock();
   }
-  namesystem.logUpdateMasterKey(key);
+} catch (InterruptedException ie) {
+  // AbstractDelegationTokenManager may crash if an exception is thrown.
+  // The interrupt flag will be detected when it attempts to sleep.
+  Thread.currentThread().interrupt();
 }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before expiring delegation token");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // 

hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk a53d62ab2 -> 47473952e


HDFS-13112. Token expiration edits may cause log corruption or deadlock. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47473952
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47473952
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47473952

Branch: refs/heads/trunk
Commit: 47473952e56b0380147d42f4110ad03c2276c961
Parents: a53d62a
Author: Kihwal Lee 
Authored: Thu Feb 15 15:32:42 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 15:32:42 2018 -0600

--
 .../DelegationTokenSecretManager.java   | 53 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java |  5 +-
 .../namenode/TestSecurityTokenEditLog.java  | 24 -
 5 files changed, 83 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47473952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index b7f89a8..3547c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before updating master key");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  namesystem.logUpdateMasterKey(key);
+}
+  } finally {
+namesystem.readUnlock();
   }
-  namesystem.logUpdateMasterKey(key);
+} catch (InterruptedException ie) {
+  // AbstractDelegationTokenManager may crash if an exception is thrown.
+  // The interrupt flag will be detected when it attempts to sleep.
+  Thread.currentThread().interrupt();
 }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   throws IOException {
-synchronized (noInterruptsLock) {
+try {
   // The edit logging code will fail catastrophically if it
   // is interrupted during a logSync, since the interrupt
   // closes the edit log files. Doing this inside the
-  // above lock and then checking interruption status
-  // prevents this bug.
-  if (Thread.interrupted()) {
-throw new InterruptedIOException(
-"Interrupted before expiring delegation token");
+  // fsn lock will prevent being interrupted when stopping
+  // the secret manager.
+  namesystem.readLockInterruptibly();
+  try {
+// this monitor isn't necessary if stopped while holding write lock
+// but for safety, guard against a stop with read lock.
+synchronized (noInterruptsLock) {
+  if (Thread.currentThread().isInterrupted()) {
+return; // leave flag set so secret monitor exits.
+  }
+  

hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b725fd692 -> a87bccdc3


MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko

(cherry picked from commit a53d62ab26e170a0338f93e228718da52e9196e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a87bccdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a87bccdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a87bccdc

Branch: refs/heads/branch-2
Commit: a87bccdc3f5419fd68f802798c6c891460cb71fa
Parents: b725fd6
Author: Jason Lowe 
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:27:46 2018 -0600

--
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a87bccdc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 8013feb..6281e71 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -300,7 +300,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 494d07505 -> af3d084d6


MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko

(cherry picked from commit a53d62ab26e170a0338f93e228718da52e9196e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af3d084d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af3d084d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af3d084d

Branch: refs/heads/branch-3.0.1
Commit: af3d084d69360731c244a4e2987bbf45a86e24dc
Parents: 494d075
Author: Jason Lowe 
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:24:42 2018 -0600

--
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af3d084d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 8013feb..6281e71 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -300,7 +300,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 11ff6327a -> b9ce463fb


MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko

(cherry picked from commit a53d62ab26e170a0338f93e228718da52e9196e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9ce463f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9ce463f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9ce463f

Branch: refs/heads/branch-3.0
Commit: b9ce463fbd963645b6bd297b0c2a87f50731fb8e
Parents: 11ff632
Author: Jason Lowe 
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:21:05 2018 -0600

--
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9ce463f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 8013feb..6281e71 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -300,7 +300,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 8b9376a08 -> 50de2676a


MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko

(cherry picked from commit a53d62ab26e170a0338f93e228718da52e9196e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50de2676
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50de2676
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50de2676

Branch: refs/heads/branch-3.1
Commit: 50de2676a1ed605e92d1fd9a0d0267076e0f951c
Parents: 8b9376a
Author: Jason Lowe 
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:16:39 2018 -0600

--
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50de2676/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 4864dd0..5134729 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -301,7 +301,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

2018-02-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk da59acd8c -> a53d62ab2


MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. 
Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a53d62ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a53d62ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a53d62ab

Branch: refs/heads/trunk
Commit: a53d62ab26e170a0338f93e228718da52e9196e4
Parents: da59acd
Author: Jason Lowe 
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe 
Committed: Thu Feb 15 15:12:57 2018 -0600

--
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a53d62ab/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 4864dd0..5134729 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -301,7 +301,7 @@ public class TestFixedLengthInputFormat {
   if (i > 0) {
 if (i == (MAX_TESTS-1)) {
   // Test a split size that is less than record len
-  numSplits = (int)(fileSize/Math.floor(recordLength/2));
+  numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
 } else {
   if (MAX_TESTS % i == 0) {
 // Let us create a split size that is forced to be 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-02-15 Thread lei
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.1-RC0 [created] 88191e920

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: xattr api cleanup

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 faf87ddf7 -> 7a4064188


xattr api cleanup


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a406418
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a406418
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a406418

Branch: refs/heads/branch-2.7
Commit: 7a4064188033f7f92caac354adf76cc3dd6ac68c
Parents: faf87dd
Author: Kihwal Lee 
Authored: Thu Feb 15 11:42:47 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:42:47 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +--
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 26 +++-
 2 files changed, 21 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a406418/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index cba3506..64d13f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -134,8 +134,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a406418/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 75fd8dc..8533945 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -837,28 +837,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1106,8 +1115,13 @@ public class FSXAttrBaseTest {
  * and non-root can't do listXAttrs on /.reserved/raw.
  */
 // non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-assertTrue(xattrNames.size() == 0);
+try {
+  userFs.listXAttrs(path);
+  fail("listXAttr should have thrown AccessControlException");
+} catch (AccessControlException ace) {
+// expected
+}
+
 try {
   // raw path
   

hadoop git commit: xattr api cleanup

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c23aed1a9 -> 22f44d2b7


xattr api cleanup

(cherry picked from commit da59acd8ca9ab5b49b988ffca64e8cce91c5f741)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22f44d2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22f44d2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22f44d2b

Branch: refs/heads/branch-2.8
Commit: 22f44d2b7fc1868dcd3743674c44a7de39a7d843
Parents: c23aed1
Author: Kihwal Lee 
Authored: Thu Feb 15 11:30:10 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:30:10 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 
 2 files changed, 51 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f44d2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index e5243ee..70563eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -136,8 +136,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f44d2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 87a975d..99e0698 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -842,28 +842,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1108,20 +1117,48 @@ public class FSXAttrBaseTest {
 }
 
 /*
-* Test that only user who have parent directory execute access
-*  can see raw.* xattrs returned from listXAttr
+* Test that user who have parent directory execute access
+*  can also not see raw.* xattrs returned from listXAttr
 */
-// non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-

hadoop git commit: xattr api cleanup

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 5b4725fb0 -> 961876e94


xattr api cleanup

(cherry picked from commit da59acd8ca9ab5b49b988ffca64e8cce91c5f741)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/961876e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/961876e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/961876e9

Branch: refs/heads/branch-2.9
Commit: 961876e94fcd9537ab72a75ae27947bc5c028ffc
Parents: 5b4725f
Author: Kihwal Lee 
Authored: Thu Feb 15 11:29:29 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:29:29 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 
 2 files changed, 51 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/961876e9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index ddc088c..1bd6670 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -136,8 +136,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/961876e9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 87a975d..99e0698 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -842,28 +842,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1108,20 +1117,48 @@ public class FSXAttrBaseTest {
 }
 
 /*
-* Test that only user who have parent directory execute access
-*  can see raw.* xattrs returned from listXAttr
+* Test that user who have parent directory execute access
+*  can also not see raw.* xattrs returned from listXAttr
 */
-// non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-

hadoop git commit: xattr api cleanup

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 07c7df4b2 -> b725fd692


xattr api cleanup

(cherry picked from commit da59acd8ca9ab5b49b988ffca64e8cce91c5f741)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b725fd69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b725fd69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b725fd69

Branch: refs/heads/branch-2
Commit: b725fd6924b08a42c896a0406fc77553c8bfe062
Parents: 07c7df4
Author: Kihwal Lee 
Authored: Thu Feb 15 11:28:45 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:28:45 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 
 2 files changed, 51 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b725fd69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index ddc088c..1bd6670 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -136,8 +136,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b725fd69/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 87a975d..99e0698 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -842,28 +842,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1108,20 +1117,48 @@ public class FSXAttrBaseTest {
 }
 
 /*
-* Test that only user who have parent directory execute access
-*  can see raw.* xattrs returned from listXAttr
+* Test that user who have parent directory execute access
+*  can also not see raw.* xattrs returned from listXAttr
 */
-// non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-

hadoop git commit: xattr api cleanup

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 07263dd46 -> 494d07505


xattr api cleanup

(cherry picked from commit da59acd8ca9ab5b49b988ffca64e8cce91c5f741)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/494d0750
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/494d0750
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/494d0750

Branch: refs/heads/branch-3.0.1
Commit: 494d075055b52b0cc922bc25237e231bb3771c90
Parents: 07263dd
Author: Kihwal Lee 
Authored: Thu Feb 15 11:15:25 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:15:25 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 
 2 files changed, 51 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/494d0750/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 3223467..be3092c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -137,8 +137,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/494d0750/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 43eeadf..b5f7573 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -843,28 +843,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1109,20 +1118,48 @@ public class FSXAttrBaseTest {
 }
 
 /*
-* Test that only user who have parent directory execute access
-*  can see raw.* xattrs returned from listXAttr
+* Test that user who have parent directory execute access
+*  can also not see raw.* xattrs returned from listXAttr
 */
-// non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-

hadoop git commit: xattr api cleanup

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b302f728f -> 11ff6327a


xattr api cleanup

(cherry picked from commit da59acd8ca9ab5b49b988ffca64e8cce91c5f741)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11ff6327
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11ff6327
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11ff6327

Branch: refs/heads/branch-3.0
Commit: 11ff6327af3c3e90da5d84d509cca9b67ef9e541
Parents: b302f72
Author: Kihwal Lee 
Authored: Thu Feb 15 11:14:42 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:14:42 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 
 2 files changed, 51 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11ff6327/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 3223467..be3092c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -137,8 +137,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11ff6327/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 43eeadf..b5f7573 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -843,28 +843,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1109,20 +1118,48 @@ public class FSXAttrBaseTest {
 }
 
 /*
-* Test that only user who have parent directory execute access
-*  can see raw.* xattrs returned from listXAttr
+* Test that user who have parent directory execute access
+*  can also not see raw.* xattrs returned from listXAttr
 */
-// non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-

hadoop git commit: xattr api cleanup

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 050f5287b -> 8b9376a08


xattr api cleanup

(cherry picked from commit da59acd8ca9ab5b49b988ffca64e8cce91c5f741)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b9376a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b9376a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b9376a0

Branch: refs/heads/branch-3.1
Commit: 8b9376a089c4c0a155565406122d56e5e687c00a
Parents: 050f528
Author: Kihwal Lee 
Authored: Thu Feb 15 11:13:56 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:13:56 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 
 2 files changed, 51 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b9376a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 3223467..be3092c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -137,8 +137,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b9376a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 43eeadf..b5f7573 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -843,28 +843,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1109,20 +1118,48 @@ public class FSXAttrBaseTest {
 }
 
 /*
-* Test that only user who have parent directory execute access
-*  can see raw.* xattrs returned from listXAttr
+* Test that user who have parent directory execute access
+*  can also not see raw.* xattrs returned from listXAttr
 */
-// non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-

hadoop git commit: xattr api cleanup

2018-02-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 481d79fed -> da59acd8c


xattr api cleanup


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da59acd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da59acd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da59acd8

Branch: refs/heads/trunk
Commit: da59acd8ca9ab5b49b988ffca64e8cce91c5f741
Parents: 481d79f
Author: Kihwal Lee 
Authored: Thu Feb 15 11:11:55 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 15 11:11:55 2018 -0600

--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 
 2 files changed, 51 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da59acd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 3223467..be3092c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -137,8 +137,7 @@ class FSDirXAttrOp {
 final boolean isRawPath = FSDirectory.isReservedRawName(src);
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
 if (fsd.isPermissionEnabled()) {
-  /* To access xattr names, you need EXECUTE in the owning directory. */
-  fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+  fsd.checkPathAccess(pc, iip, FsAction.READ);
 }
 final List all = FSDirXAttrOp.getXAttrs(fsd, iip);
 return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da59acd8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 43eeadf..b5f7573 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -843,28 +843,37 @@ public class FSXAttrBaseTest {
 }
 
 /*
- * Check that execute/scan access to the parent dir is sufficient to get
- * xattr names.
+ * Check that execute/scan access to the parent dir is not
+ * sufficient to get xattr names.
  */
 fs.setPermission(path, new FsPermission((short) 0701));
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
+try {
   final FileSystem userFs = dfsCluster.getFileSystem();
   userFs.listXAttrs(childDir);
-  return null;
+  fail("expected AccessControlException");
+} catch (AccessControlException ace) {
+  GenericTestUtils.assertExceptionContains("Permission denied", ace);
 }
+return null;
+  }
   });
 
 /*
  * Test that xattrs in the "trusted" namespace are filtered correctly.
  */
+// Allow the user to read child path.
+fs.setPermission(childDir, new FsPermission((short) 0704));
 fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
 user.doAs(new PrivilegedExceptionAction() {
 @Override
 public Object run() throws Exception {
   final FileSystem userFs = dfsCluster.getFileSystem();
-  assertTrue(userFs.listXAttrs(childDir).size() == 1);
+  List xattrs = userFs.listXAttrs(childDir);
+  assertTrue(xattrs.size() == 1);
+  assertEquals(name1, xattrs.get(0));
   return null;
 }
   });
@@ -1109,20 +1118,48 @@ public class FSXAttrBaseTest {
 }
 
 /*
-* Test that only user who have parent directory execute access
-*  can see raw.* xattrs returned from listXAttr
+* Test that user who have parent directory execute access
+*  can also not see raw.* xattrs returned from listXAttr
 */
-// non-raw path
-final List xattrNames = userFs.listXAttrs(path);
-assertTrue(xattrNames.size() == 0);
+try {
+  // non-raw path
+ 

[1/2] hadoop git commit: HADOOP-13972. ADLS to support per-store configuration. Contributed by Sharad Sonker.

2018-02-15 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6f944452f -> 07c7df4b2
  refs/heads/branch-3.0 1888b9480 -> b302f728f


HADOOP-13972. ADLS to support per-store configuration.
Contributed by Sharad Sonker.

(cherry picked from commit 050f5287b79324b7f6231b879c0bfc608203b980)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b302f728
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b302f728
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b302f728

Branch: refs/heads/branch-3.0
Commit: b302f728f8875cf2159f7878757019d9369e9b37
Parents: 1888b94
Author: Steve Loughran 
Authored: Thu Feb 15 16:25:55 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 16:29:30 2018 +

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +-
 .../src/site/markdown/index.md  |  37 +
 .../fs/adl/TestValidateConfiguration.java   | 152 +++
 .../hadoop/fs/adl/common/Parallelized.java  |   2 +-
 5 files changed, 239 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b302f728/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 790902c..e3a4ad6 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -33,6 +33,11 @@ public final class AdlConfKeys {
   public static final String AZURE_AD_REFRESH_URL_KEY =
   "fs.adl.oauth2.refresh.url";
 
+  public static final String AZURE_AD_ACCOUNT_PREFIX =
+  "fs.adl.account.";
+  public static final String AZURE_AD_PREFIX =
+  "fs.adl.";
+
   // optional when provider type is refresh or client id.
   public static final String AZURE_AD_TOKEN_PROVIDER_CLASS_KEY =
   "fs.adl.oauth2.access.token.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b302f728/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index a496595..9f54a36 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -24,8 +24,10 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
@@ -37,6 +39,8 @@ import 
com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -74,6 +78,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AdlFileSystem.class);
   public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
@@ -115,12 +121,19 @@ public class AdlFileSystem extends FileSystem {
   /**
* Called after a new FileSystem instance is constructed.
*
-   * @param storeUri a uri whose authority section names the host, port, etc.
-   * for this FileSystem
-   * @param conf the configuration
+   * @param storeUri  a uri whose authority section names the host, port,
+   *  etc. for this FileSystem
+   * @param originalConf  the configuration to use for the FS. The account-
+   *  specific options are patched over the base ones
+   *   

[2/2] hadoop git commit: HADOOP-13972. ADLS to support per-store configuration. Contributed by Sharad Sonker.

2018-02-15 Thread stevel
HADOOP-13972. ADLS to support per-store configuration.
Contributed by Sharad Sonker.

(cherry picked from commit 050f5287b79324b7f6231b879c0bfc608203b980)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07c7df4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07c7df4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07c7df4b

Branch: refs/heads/branch-2
Commit: 07c7df4b261f23e567d58936b78aee4ab73cb5fc
Parents: 6f94445
Author: Steve Loughran 
Authored: Thu Feb 15 16:25:55 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 16:29:44 2018 +

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +-
 .../src/site/markdown/index.md  |  37 +
 .../fs/adl/TestValidateConfiguration.java   | 152 +++
 .../hadoop/fs/adl/common/Parallelized.java  |   2 +-
 5 files changed, 239 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07c7df4b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 790902c..e3a4ad6 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -33,6 +33,11 @@ public final class AdlConfKeys {
   public static final String AZURE_AD_REFRESH_URL_KEY =
   "fs.adl.oauth2.refresh.url";
 
+  public static final String AZURE_AD_ACCOUNT_PREFIX =
+  "fs.adl.account.";
+  public static final String AZURE_AD_PREFIX =
+  "fs.adl.";
+
   // optional when provider type is refresh or client id.
   public static final String AZURE_AD_TOKEN_PROVIDER_CLASS_KEY =
   "fs.adl.oauth2.access.token.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07c7df4b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 0e141e3..4488898 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -24,8 +24,10 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
@@ -38,6 +40,8 @@ import 
com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -75,6 +79,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AdlFileSystem.class);
   public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
@@ -116,12 +122,19 @@ public class AdlFileSystem extends FileSystem {
   /**
* Called after a new FileSystem instance is constructed.
*
-   * @param storeUri a uri whose authority section names the host, port, etc.
-   * for this FileSystem
-   * @param conf the configuration
+   * @param storeUri  a uri whose authority section names the host, port,
+   *  etc. for this FileSystem
+   * @param originalConf  the configuration to use for the FS. The account-
+   *  specific options are patched over the base ones
+   *  before any use is made of the config.
*/
   @Override
-  public void initialize(URI storeUri, Configuration 

[1/2] hadoop git commit: HADOOP-13972. ADLS to support per-store configuration. Contributed by Sharad Sonker.

2018-02-15 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 96c047fbb -> 050f5287b
  refs/heads/trunk 9a013b255 -> 481d79fed


HADOOP-13972. ADLS to support per-store configuration.
Contributed by Sharad Sonker.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/050f5287
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/050f5287
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/050f5287

Branch: refs/heads/branch-3.1
Commit: 050f5287b79324b7f6231b879c0bfc608203b980
Parents: 96c047f
Author: Steve Loughran 
Authored: Thu Feb 15 16:25:55 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 16:25:55 2018 +

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +-
 .../src/site/markdown/index.md  |  37 +
 .../fs/adl/TestValidateConfiguration.java   | 152 +++
 .../hadoop/fs/adl/common/Parallelized.java  |   2 +-
 5 files changed, 239 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/050f5287/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 790902c..e3a4ad6 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -33,6 +33,11 @@ public final class AdlConfKeys {
   public static final String AZURE_AD_REFRESH_URL_KEY =
   "fs.adl.oauth2.refresh.url";
 
+  public static final String AZURE_AD_ACCOUNT_PREFIX =
+  "fs.adl.account.";
+  public static final String AZURE_AD_PREFIX =
+  "fs.adl.";
+
   // optional when provider type is refresh or client id.
   public static final String AZURE_AD_TOKEN_PROVIDER_CLASS_KEY =
   "fs.adl.oauth2.access.token.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/050f5287/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index a496595..9f54a36 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -24,8 +24,10 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
@@ -37,6 +39,8 @@ import 
com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -74,6 +78,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AdlFileSystem.class);
   public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
@@ -115,12 +121,19 @@ public class AdlFileSystem extends FileSystem {
   /**
* Called after a new FileSystem instance is constructed.
*
-   * @param storeUri a uri whose authority section names the host, port, etc.
-   * for this FileSystem
-   * @param conf the configuration
+   * @param storeUri  a uri whose authority section names the host, port,
+   *  etc. for this FileSystem
+   * @param originalConf  the configuration to use for the FS. The account-
+   *  specific options are patched over the base ones
+   *  before any use is made of the config.
*/
   

[2/2] hadoop git commit: HADOOP-13972. ADLS to support per-store configuration. Contributed by Sharad Sonker.

2018-02-15 Thread stevel
HADOOP-13972. ADLS to support per-store configuration.
Contributed by Sharad Sonker.

(cherry picked from commit 050f5287b79324b7f6231b879c0bfc608203b980)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481d79fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481d79fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481d79fe

Branch: refs/heads/trunk
Commit: 481d79fedc48942654dab08e23e71e80c8eb2aca
Parents: 9a013b2
Author: Steve Loughran 
Authored: Thu Feb 15 16:25:55 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 16:27:31 2018 +

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +-
 .../src/site/markdown/index.md  |  37 +
 .../fs/adl/TestValidateConfiguration.java   | 152 +++
 .../hadoop/fs/adl/common/Parallelized.java  |   2 +-
 5 files changed, 239 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 790902c..e3a4ad6 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -33,6 +33,11 @@ public final class AdlConfKeys {
   public static final String AZURE_AD_REFRESH_URL_KEY =
   "fs.adl.oauth2.refresh.url";
 
+  public static final String AZURE_AD_ACCOUNT_PREFIX =
+  "fs.adl.account.";
+  public static final String AZURE_AD_PREFIX =
+  "fs.adl.";
+
   // optional when provider type is refresh or client id.
   public static final String AZURE_AD_TOKEN_PROVIDER_CLASS_KEY =
   "fs.adl.oauth2.access.token.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index a496595..9f54a36 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -24,8 +24,10 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
@@ -37,6 +39,8 @@ import 
com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -74,6 +78,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AdlFileSystem.class);
   public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
@@ -115,12 +121,19 @@ public class AdlFileSystem extends FileSystem {
   /**
* Called after a new FileSystem instance is constructed.
*
-   * @param storeUri a uri whose authority section names the host, port, etc.
-   * for this FileSystem
-   * @param conf the configuration
+   * @param storeUri  a uri whose authority section names the host, port,
+   *  etc. for this FileSystem
+   * @param originalConf  the configuration to use for the FS. The account-
+   *  specific options are patched over the base ones
+   *  before any use is made of the config.
*/
   @Override
-  public void initialize(URI storeUri, Configuration 

[3/4] hadoop git commit: HADOOP-15176. Enhance IAM Assumed Role support in S3A client. Contributed by Steve Loughran

2018-02-15 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
deleted file mode 100644
index 7c8760b..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.file.AccessDeniedException;
-import java.util.concurrent.Callable;
-
-import com.amazonaws.auth.AWSCredentials;
-import 
com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
-import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-
-/**
- * Tests use of assumed roles.
- * Only run if an assumed role is provided.
- */
-public class ITestAssumeRole extends AbstractS3ATestBase {
-
-  private static final Logger LOG =
-  LoggerFactory.getLogger(ITestAssumeRole.class);
-
-  private static final String ARN_EXAMPLE
-  = "arn:aws:kms:eu-west-1:000:key/" +
-  "000-16c9-4832-a1a9-c8bbef25ec8b";
-
-  private static final String E_BAD_ROLE
-  = "Not authorized to perform sts:AssumeRole";
-
-  /**
-   * This is AWS policy removes read access.
-   */
-  public static final String RESTRICTED_POLICY = "{\n"
-  + "   \"Version\": \"2012-10-17\",\n"
-  + "   \"Statement\": [{\n"
-  + "  \"Effect\": \"Deny\",\n"
-  + "  \"Action\": \"s3:ListObjects\",\n"
-  + "  \"Resource\": \"*\"\n"
-  + "}\n"
-  + "   ]\n"
-  + "}";
-
-  private void assumeRoleTests() {
-assume("No ARN for role tests", !getAssumedRoleARN().isEmpty());
-  }
-
-  private String getAssumedRoleARN() {
-return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
-  }
-
-  /**
-   * Expect a filesystem to fail to instantiate.
-   * @param conf config to use
-   * @param clazz class of exception to expect
-   * @param text text in exception
-   * @param  type of exception as inferred from clazz
-   * @throws Exception if the exception was the wrong class
-   */
-  private  void expectFileSystemFailure(
-  Configuration conf,
-  Class clazz,
-  String text) throws Exception {
-interceptC(clazz,
-text,
-() -> new Path(getFileSystem().getUri()).getFileSystem(conf));
-  }
-
-  /**
-   * Experimental variant of intercept() which closes any Closeable
-   * returned.
-   */
-  private static  E interceptC(
-  Class clazz, String text,
-  Callable eval)
-  throws Exception {
-
-return intercept(clazz, text,
-() -> {
-  try (Closeable c = eval.call()) {
-return c.toString();
-  }
-});
-  }
-
-  @Test
-  public void testCreateCredentialProvider() throws IOException {
-assumeRoleTests();
-describe("Create the credential provider");
-
-String roleARN = getAssumedRoleARN();
-
-Configuration conf = new Configuration(getContract().getConf());
-conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
-conf.set(ASSUMED_ROLE_ARN, roleARN);
-conf.set(ASSUMED_ROLE_SESSION_NAME, "valid");
-conf.set(ASSUMED_ROLE_SESSION_DURATION, "45m");
-conf.set(ASSUMED_ROLE_POLICY, RESTRICTED_POLICY);
-try (AssumedRoleCredentialProvider provider
- = new AssumedRoleCredentialProvider(conf)) {
-  LOG.info("Provider is {}", provider);
-  AWSCredentials credentials = provider.getCredentials();
-  assertNotNull("Null credentials from " + provider, credentials);
-}
-  }
-
-  @Test
-  public void testAssumeRoleCreateFS() throws 

[4/4] hadoop git commit: HADOOP-15176. Enhance IAM Assumed Role support in S3A client. Contributed by Steve Loughran

2018-02-15 Thread stevel
HADOOP-15176. Enhance IAM Assumed Role support in S3A client.
Contributed by Steve Loughran

(cherry picked from commit 96c047fbb98c2378eed9693a724d4cbbd03c00fd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a013b25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a013b25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a013b25

Branch: refs/heads/trunk
Commit: 9a013b255f301c557c3868dc1ad657202e9e7a67
Parents: b27ab7d
Author: Steve Loughran 
Authored: Thu Feb 15 15:56:10 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 15:57:10 2018 +

--
 .../apache/hadoop/util/JsonSerialization.java   |   8 +
 .../src/main/resources/core-default.xml |  13 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  40 +-
 .../apache/hadoop/test/TestLambdaTestUtils.java |  36 +
 .../fs/s3a/AssumedRoleCredentialProvider.java   | 197 -
 .../org/apache/hadoop/fs/s3a/Constants.java |   2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  17 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  53 +-
 .../s3a/auth/AssumedRoleCredentialProvider.java | 205 +
 .../apache/hadoop/fs/s3a/auth/RoleModel.java| 314 
 .../apache/hadoop/fs/s3a/auth/RolePolicies.java | 228 ++
 .../apache/hadoop/fs/s3a/auth/package-info.java |  27 +
 .../hadoop/fs/s3a/commit/CommitOperations.java  |   2 +-
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 274 ++-
 .../site/markdown/tools/hadoop-aws/testing.md   |  15 +-
 .../s3a/ITestS3AContractDistCpAssumedRole.java  |  52 --
 .../apache/hadoop/fs/s3a/ITestAssumeRole.java   | 324 
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  46 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  40 +-
 .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 789 +++
 .../auth/ITestAssumedRoleCommitOperations.java  | 130 +++
 .../hadoop/fs/s3a/auth/RoleTestUtils.java   | 171 
 .../fs/s3a/commit/AbstractCommitITest.java  |  12 +-
 .../fs/s3a/commit/ITestCommitOperations.java|   4 +-
 24 files changed, 2345 insertions(+), 654 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
index 15f4fef..86c4df6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
@@ -90,6 +90,14 @@ public class JsonSerialization {
   }
 
   /**
+   * Get the mapper of this class.
+   * @return the mapper
+   */
+  public ObjectMapper getMapper() {
+return mapper;
+  }
+
+  /**
* Convert from JSON.
*
* @param json input

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ede1f1c..ece54c4 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -977,20 +977,21 @@
 
 
 
-  fs.s3a.assumed.role.session.duration
-  30m
+  fs.s3a.assumed.role.policy
+  
   
-Duration of assumed roles before a refresh is attempted.
+JSON policy to apply to the role.
 Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   
 
 
 
-  fs.s3a.assumed.role.policy
-  
+  fs.s3a.assumed.role.session.duration
+  30m
   
-JSON policy containing more restrictions to apply to the role.
+Duration of assumed roles before a refresh is attempted.
 Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+Range: 15m to 1h
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index 22208f7..cbb5288 100644
--- 

[1/4] hadoop git commit: HADOOP-15176. Enhance IAM Assumed Role support in S3A client. Contributed by Steve Loughran

2018-02-15 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c761e658f -> 96c047fbb
  refs/heads/trunk b27ab7dd8 -> 9a013b255


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c047fb/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
deleted file mode 100644
index 7c8760b..000
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.file.AccessDeniedException;
-import java.util.concurrent.Callable;
-
-import com.amazonaws.auth.AWSCredentials;
-import 
com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
-import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-
-/**
- * Tests use of assumed roles.
- * Only run if an assumed role is provided.
- */
-public class ITestAssumeRole extends AbstractS3ATestBase {
-
-  private static final Logger LOG =
-  LoggerFactory.getLogger(ITestAssumeRole.class);
-
-  private static final String ARN_EXAMPLE
-  = "arn:aws:kms:eu-west-1:000:key/" +
-  "000-16c9-4832-a1a9-c8bbef25ec8b";
-
-  private static final String E_BAD_ROLE
-  = "Not authorized to perform sts:AssumeRole";
-
-  /**
-   * This is AWS policy removes read access.
-   */
-  public static final String RESTRICTED_POLICY = "{\n"
-  + "   \"Version\": \"2012-10-17\",\n"
-  + "   \"Statement\": [{\n"
-  + "  \"Effect\": \"Deny\",\n"
-  + "  \"Action\": \"s3:ListObjects\",\n"
-  + "  \"Resource\": \"*\"\n"
-  + "}\n"
-  + "   ]\n"
-  + "}";
-
-  private void assumeRoleTests() {
-assume("No ARN for role tests", !getAssumedRoleARN().isEmpty());
-  }
-
-  private String getAssumedRoleARN() {
-return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
-  }
-
-  /**
-   * Expect a filesystem to fail to instantiate.
-   * @param conf config to use
-   * @param clazz class of exception to expect
-   * @param text text in exception
-   * @param  type of exception as inferred from clazz
-   * @throws Exception if the exception was the wrong class
-   */
-  private  void expectFileSystemFailure(
-  Configuration conf,
-  Class clazz,
-  String text) throws Exception {
-interceptC(clazz,
-text,
-() -> new Path(getFileSystem().getUri()).getFileSystem(conf));
-  }
-
-  /**
-   * Experimental variant of intercept() which closes any Closeable
-   * returned.
-   */
-  private static  E interceptC(
-  Class clazz, String text,
-  Callable eval)
-  throws Exception {
-
-return intercept(clazz, text,
-() -> {
-  try (Closeable c = eval.call()) {
-return c.toString();
-  }
-});
-  }
-
-  @Test
-  public void testCreateCredentialProvider() throws IOException {
-assumeRoleTests();
-describe("Create the credential provider");
-
-String roleARN = getAssumedRoleARN();
-
-Configuration conf = new Configuration(getContract().getConf());
-conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
-conf.set(ASSUMED_ROLE_ARN, roleARN);
-conf.set(ASSUMED_ROLE_SESSION_NAME, "valid");
-conf.set(ASSUMED_ROLE_SESSION_DURATION, "45m");
-conf.set(ASSUMED_ROLE_POLICY, RESTRICTED_POLICY);
-try (AssumedRoleCredentialProvider provider
- = new AssumedRoleCredentialProvider(conf)) {
-  LOG.info("Provider is {}", provider);
-  AWSCredentials credentials = provider.getCredentials();
-  

[2/4] hadoop git commit: HADOOP-15176. Enhance IAM Assumed Role support in S3A client. Contributed by Steve Loughran

2018-02-15 Thread stevel
HADOOP-15176. Enhance IAM Assumed Role support in S3A client.
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96c047fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96c047fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96c047fb

Branch: refs/heads/branch-3.1
Commit: 96c047fbb98c2378eed9693a724d4cbbd03c00fd
Parents: c761e65
Author: Steve Loughran 
Authored: Thu Feb 15 15:56:10 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 15:56:10 2018 +

--
 .../apache/hadoop/util/JsonSerialization.java   |   8 +
 .../src/main/resources/core-default.xml |  13 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  40 +-
 .../apache/hadoop/test/TestLambdaTestUtils.java |  36 +
 .../fs/s3a/AssumedRoleCredentialProvider.java   | 197 -
 .../org/apache/hadoop/fs/s3a/Constants.java |   2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  17 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  53 +-
 .../s3a/auth/AssumedRoleCredentialProvider.java | 205 +
 .../apache/hadoop/fs/s3a/auth/RoleModel.java| 314 
 .../apache/hadoop/fs/s3a/auth/RolePolicies.java | 228 ++
 .../apache/hadoop/fs/s3a/auth/package-info.java |  27 +
 .../hadoop/fs/s3a/commit/CommitOperations.java  |   2 +-
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 274 ++-
 .../site/markdown/tools/hadoop-aws/testing.md   |  15 +-
 .../s3a/ITestS3AContractDistCpAssumedRole.java  |  52 --
 .../apache/hadoop/fs/s3a/ITestAssumeRole.java   | 324 
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  46 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  40 +-
 .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 789 +++
 .../auth/ITestAssumedRoleCommitOperations.java  | 130 +++
 .../hadoop/fs/s3a/auth/RoleTestUtils.java   | 171 
 .../fs/s3a/commit/AbstractCommitITest.java  |  12 +-
 .../fs/s3a/commit/ITestCommitOperations.java|   4 +-
 24 files changed, 2345 insertions(+), 654 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c047fb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
index 15f4fef..86c4df6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
@@ -90,6 +90,14 @@ public class JsonSerialization {
   }
 
   /**
+   * Get the mapper of this class.
+   * @return the mapper
+   */
+  public ObjectMapper getMapper() {
+return mapper;
+  }
+
+  /**
* Convert from JSON.
*
* @param json input

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c047fb/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ede1f1c..ece54c4 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -977,20 +977,21 @@
 
 
 
-  fs.s3a.assumed.role.session.duration
-  30m
+  fs.s3a.assumed.role.policy
+  
   
-Duration of assumed roles before a refresh is attempted.
+JSON policy to apply to the role.
 Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   
 
 
 
-  fs.s3a.assumed.role.policy
-  
+  fs.s3a.assumed.role.session.duration
+  30m
   
-JSON policy containing more restrictions to apply to the role.
+Duration of assumed roles before a refresh is attempted.
 Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+Range: 15m to 1h
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c047fb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index 22208f7..cbb5288 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
+++ 

[2/3] hadoop git commit: HADOOP-15076. Enhance S3A troubleshooting documents and add a performance document. Contributed by Steve Loughran.

2018-02-15 Thread stevel
HADOOP-15076. Enhance S3A troubleshooting documents and add a performance 
document.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c761e658
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c761e658
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c761e658

Branch: refs/heads/branch-3.1
Commit: c761e658f6594c4e519ed39ef36669de2c5cee15
Parents: 58a2120
Author: Steve Loughran 
Authored: Thu Feb 15 14:56:32 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 14:56:32 2018 +

--
 .../markdown/tools/hadoop-aws/encryption.md |  21 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  77 +-
 .../markdown/tools/hadoop-aws/performance.md| 518 +
 .../tools/hadoop-aws/troubleshooting_s3a.md | 753 ---
 4 files changed, 1029 insertions(+), 340 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c761e658/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
index 719c5e5..54398d7 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
@@ -37,6 +37,8 @@ and keys with which the file was encrypted.
 * You can use AWS bucket policies to mandate encryption rules for a bucket.
 * You can use S3A per-bucket configuration to ensure that S3A clients use 
encryption
 policies consistent with the mandated rules.
+* You can use S3 Default Encryption to encrypt data without needing to
+set anything in the client.
 * Changing the encryption options on the client does not change how existing
 files were encrypted, except when the files are renamed.
 * For all mechanisms other than SSE-C, clients do not need any configuration
@@ -58,9 +60,10 @@ The server-side "SSE" encryption is performed with symmetric 
AES256 encryption;
 S3 offers different mechanisms for actually defining the key to use.
 
 
-There are thrre key management mechanisms, which in order of simplicity of use,
+There are four key management mechanisms, which in order of simplicity of use,
 are:
 
+* S3 Default Encryption
 * SSE-S3: an AES256 key is generated in S3, and saved alongside the data.
 * SSE-KMS: an AES256 key is generated in S3, and encrypted with a secret key 
provided
 by Amazon's Key Management Service, a key referenced by name in the uploading 
client.
@@ -68,6 +71,19 @@ by Amazon's Key Management Service, a key referenced by name 
in the uploading cl
 to encrypt and decrypt the data.
 
 
+##  S3 Default Encryption
+
+This feature allows the administrators of the AWS account to set the "default"
+encryption policy on a bucket -the encryption to use if the client does
+not explicitly declare an encryption algorithm.
+
+[S3 Default Encryption for S3 
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+
+This supports SSE-S3 and SSE-KMS.
+
+There is no need to set anything up in the client: do it in the AWS console.
+
+
 ##  SSE-S3 Amazon S3-Managed Encryption Keys
 
 In SSE-S3, all keys and secrets are managed inside S3. This is the simplest 
encryption mechanism.
@@ -413,7 +429,6 @@ How can you do that from Hadoop? With `rename()`.
 
 The S3A client mimics a real filesystem's' rename operation by copying all the
 source files to the destination paths, then deleting the old ones.
-If you do a rename()
 
 Note: this does not work for SSE-C, because you cannot set a different key
 for reading as for writing, and you must supply that key for reading. There
@@ -421,7 +436,7 @@ you need to copy one bucket to a different bucket, one with 
a different key.
 Use `distCp`for this, with per-bucket encryption policies.
 
 
-##  Troubleshooting Encryption
+##  Troubleshooting Encryption
 
 The [troubleshooting](./troubleshooting_s3a.html) document covers
 stack traces which may surface when working with encrypted data.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c761e658/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 0e03100..edf392d 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -25,6 +25,7 @@ Please use `s3a:` as the connector to data hosted in S3 with 

[3/3] hadoop git commit: HADOOP-15076. Enhance S3A troubleshooting documents and add a performance document. Contributed by Steve Loughran.

2018-02-15 Thread stevel
HADOOP-15076. Enhance S3A troubleshooting documents and add a performance 
document.
Contributed by Steve Loughran.

(cherry picked from commit c761e658f6594c4e519ed39ef36669de2c5cee15)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b27ab7dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b27ab7dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b27ab7dd

Branch: refs/heads/trunk
Commit: b27ab7dd81359df0a7594ebb98e656a41cd19250
Parents: c9a373f
Author: Steve Loughran 
Authored: Thu Feb 15 14:57:56 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 14:57:56 2018 +

--
 .../markdown/tools/hadoop-aws/encryption.md |  21 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  77 +-
 .../markdown/tools/hadoop-aws/performance.md| 518 +
 .../tools/hadoop-aws/troubleshooting_s3a.md | 753 ---
 4 files changed, 1029 insertions(+), 340 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b27ab7dd/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
index 719c5e5..54398d7 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
@@ -37,6 +37,8 @@ and keys with which the file was encrypted.
 * You can use AWS bucket policies to mandate encryption rules for a bucket.
 * You can use S3A per-bucket configuration to ensure that S3A clients use 
encryption
 policies consistent with the mandated rules.
+* You can use S3 Default Encryption to encrypt data without needing to
+set anything in the client.
 * Changing the encryption options on the client does not change how existing
 files were encrypted, except when the files are renamed.
 * For all mechanisms other than SSE-C, clients do not need any configuration
@@ -58,9 +60,10 @@ The server-side "SSE" encryption is performed with symmetric 
AES256 encryption;
 S3 offers different mechanisms for actually defining the key to use.
 
 
-There are thrre key management mechanisms, which in order of simplicity of use,
+There are four key management mechanisms, which in order of simplicity of use,
 are:
 
+* S3 Default Encryption
 * SSE-S3: an AES256 key is generated in S3, and saved alongside the data.
 * SSE-KMS: an AES256 key is generated in S3, and encrypted with a secret key 
provided
 by Amazon's Key Management Service, a key referenced by name in the uploading 
client.
@@ -68,6 +71,19 @@ by Amazon's Key Management Service, a key referenced by name 
in the uploading cl
 to encrypt and decrypt the data.
 
 
+##  S3 Default Encryption
+
+This feature allows the administrators of the AWS account to set the "default"
+encryption policy on a bucket -the encryption to use if the client does
+not explicitly declare an encryption algorithm.
+
+[S3 Default Encryption for S3 
Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+
+This supports SSE-S3 and SSE-KMS.
+
+There is no need to set anything up in the client: do it in the AWS console.
+
+
 ##  SSE-S3 Amazon S3-Managed Encryption Keys
 
 In SSE-S3, all keys and secrets are managed inside S3. This is the simplest 
encryption mechanism.
@@ -413,7 +429,6 @@ How can you do that from Hadoop? With `rename()`.
 
 The S3A client mimics a real filesystem's' rename operation by copying all the
 source files to the destination paths, then deleting the old ones.
-If you do a rename()
 
 Note: this does not work for SSE-C, because you cannot set a different key
 for reading as for writing, and you must supply that key for reading. There
@@ -421,7 +436,7 @@ you need to copy one bucket to a different bucket, one with 
a different key.
 Use `distCp`for this, with per-bucket encryption policies.
 
 
-##  Troubleshooting Encryption
+##  Troubleshooting Encryption
 
 The [troubleshooting](./troubleshooting_s3a.html) document covers
 stack traces which may surface when working with encrypted data.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b27ab7dd/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 0e03100..edf392d 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -25,6 +25,7 @@ 

[1/3] [hadoop] Git Push Summary

2018-02-15 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 58a2120e8 -> c761e658f
  refs/heads/trunk c9a373fb1 -> b27ab7dd8

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/4] hadoop git commit: HADOOP-15090. Add ADL troubleshooting doc. Contributed by Steve Loughran.

2018-02-15 Thread stevel
HADOOP-15090. Add ADL troubleshooting doc.
Contributed by Steve Loughran.

(cherry picked from commit 58a2120e8a31307f19551f87be4e81d4fb626de1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9a373fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9a373fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9a373fb

Branch: refs/heads/trunk
Commit: c9a373fb14bbf826324c2547397f82b73bd466f4
Parents: 6ea7d78
Author: Steve Loughran 
Authored: Thu Feb 15 14:26:00 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 14:26:00 2018 +

--
 .../src/site/markdown/index.md  |   4 +
 .../src/site/markdown/troubleshooting_adl.md| 146 +++
 2 files changed, 150 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a373fb/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ca79321..d2b6edf 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -22,6 +22,10 @@ The `hadoop-azure-datalake` module provides support for 
integration with the
 [Azure Data Lake 
Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
 This support comes via the JAR file `azure-datalake-store.jar`.
 
+### Related Documents
+
+* [Troubleshooting](troubleshooting_adl.html).
+
 ## Features
 
 * Read and write data stored in an Azure Data Lake Storage account.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a373fb/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
new file mode 100644
index 000..80b2a6f
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
@@ -0,0 +1,146 @@
+
+
+# Troubleshooting ADL
+
+
+
+
+## Error messages
+
+
+### Error fetching access token:
+
+You aren't authenticated.
+
+### Error fetching access token:  JsonParseException
+
+This means a problem talking to the oauth endpoint.
+
+
+```
+Operation null failed with exception 
com.fasterxml.jackson.core.JsonParseException : Unexpected character ('<' (code 
60)): expected a valid value (number, String, array, object, 'true', 'false' or 
'null')
+  at [Source: 
sun.net.www.protocol.http.HttpURLConnection$HttpInputStream@211d30ed; line: 3, 
column: 2]
+  Last encountered exception thrown after 5 tries. 
[com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException]
+  [ServerRequestId:null]
+  at 
com.microsoft.azure.datalake.store.ADLStoreClient.getExceptionFromResponse(ADLStoreClient.java:1147)
+  at 
com.microsoft.azure.datalake.store.ADLStoreClient.getDirectoryEntry(ADLStoreClient.java:725)
+  at 
org.apache.hadoop.fs.adl.AdlFileSystem.getFileStatus(AdlFileSystem.java:476)
+  at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1713)
+  at 
org.apache.hadoop.fs.contract.ContractTestUtils.rm(ContractTestUtils.java:397)
+  at 
org.apache.hadoop.fs.contract.ContractTestUtils.cleanup(ContractTestUtils.java:374)
+  at 
org.apache.hadoop.fs.contract.AbstractFSContractTestBase.deleteTestDirInTeardown(AbstractFSContractTestBase.java:213)
+  at 
org.apache.hadoop.fs.contract.AbstractFSContractTestBase.teardown(AbstractFSContractTestBase.java:204)
+  at 
org.apache.hadoop.fs.contract.AbstractContractOpenTest.teardown(AbstractContractOpenTest.java:64)
+  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+  at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
+  at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
+  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
+  at 

[1/4] hadoop git commit: HADOOP-15090. Add ADL troubleshooting doc. Contributed by Steve Loughran.

2018-02-15 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 56b950d77 -> 6f944452f
  refs/heads/branch-3.0 4b4157441 -> 1888b9480
  refs/heads/branch-3.1 bfdf99eb3 -> 58a2120e8
  refs/heads/trunk 6ea7d78cc -> c9a373fb1


HADOOP-15090. Add ADL troubleshooting doc.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58a2120e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58a2120e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58a2120e

Branch: refs/heads/branch-3.1
Commit: 58a2120e8a31307f19551f87be4e81d4fb626de1
Parents: bfdf99e
Author: Steve Loughran 
Authored: Thu Feb 15 14:24:36 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 14:24:36 2018 +

--
 .../src/site/markdown/index.md  |   4 +
 .../src/site/markdown/troubleshooting_adl.md| 146 +++
 2 files changed, 150 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58a2120e/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ca79321..d2b6edf 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -22,6 +22,10 @@ The `hadoop-azure-datalake` module provides support for 
integration with the
 [Azure Data Lake 
Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
 This support comes via the JAR file `azure-datalake-store.jar`.
 
+### Related Documents
+
+* [Troubleshooting](troubleshooting_adl.html).
+
 ## Features
 
 * Read and write data stored in an Azure Data Lake Storage account.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58a2120e/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
new file mode 100644
index 000..80b2a6f
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
@@ -0,0 +1,146 @@
+
+
+# Troubleshooting ADL
+
+
+
+
+## Error messages
+
+
+### Error fetching access token:
+
+You aren't authenticated.
+
+### Error fetching access token:  JsonParseException
+
+This means a problem talking to the oauth endpoint.
+
+
+```
+Operation null failed with exception 
com.fasterxml.jackson.core.JsonParseException : Unexpected character ('<' (code 
60)): expected a valid value (number, String, array, object, 'true', 'false' or 
'null')
+  at [Source: 
sun.net.www.protocol.http.HttpURLConnection$HttpInputStream@211d30ed; line: 3, 
column: 2]
+  Last encountered exception thrown after 5 tries. 
[com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException]
+  [ServerRequestId:null]
+  at 
com.microsoft.azure.datalake.store.ADLStoreClient.getExceptionFromResponse(ADLStoreClient.java:1147)
+  at 
com.microsoft.azure.datalake.store.ADLStoreClient.getDirectoryEntry(ADLStoreClient.java:725)
+  at 
org.apache.hadoop.fs.adl.AdlFileSystem.getFileStatus(AdlFileSystem.java:476)
+  at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1713)
+  at 
org.apache.hadoop.fs.contract.ContractTestUtils.rm(ContractTestUtils.java:397)
+  at 
org.apache.hadoop.fs.contract.ContractTestUtils.cleanup(ContractTestUtils.java:374)
+  at 
org.apache.hadoop.fs.contract.AbstractFSContractTestBase.deleteTestDirInTeardown(AbstractFSContractTestBase.java:213)
+  at 
org.apache.hadoop.fs.contract.AbstractFSContractTestBase.teardown(AbstractFSContractTestBase.java:204)
+  at 
org.apache.hadoop.fs.contract.AbstractContractOpenTest.teardown(AbstractContractOpenTest.java:64)
+  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+  at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
+  at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)

[3/4] hadoop git commit: HADOOP-15090. Add ADL troubleshooting doc. Contributed by Steve Loughran.

2018-02-15 Thread stevel
HADOOP-15090. Add ADL troubleshooting doc.
Contributed by Steve Loughran.

(cherry picked from commit 58a2120e8a31307f19551f87be4e81d4fb626de1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1888b948
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1888b948
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1888b948

Branch: refs/heads/branch-3.0
Commit: 1888b94806d267ceb15085d727a876f5460aba97
Parents: 4b41574
Author: Steve Loughran 
Authored: Thu Feb 15 14:24:36 2018 +
Committer: Steve Loughran 
Committed: Thu Feb 15 14:26:18 2018 +

--
 .../src/site/markdown/index.md  |   4 +
 .../src/site/markdown/troubleshooting_adl.md| 146 +++
 2 files changed, 150 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1888b948/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ca79321..d2b6edf 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -22,6 +22,10 @@ The `hadoop-azure-datalake` module provides support for 
integration with the
 [Azure Data Lake 
Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
 This support comes via the JAR file `azure-datalake-store.jar`.
 
+### Related Documents
+
+* [Troubleshooting](troubleshooting_adl.html).
+
 ## Features
 
 * Read and write data stored in an Azure Data Lake Storage account.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1888b948/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
new file mode 100644
index 000..80b2a6f
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
@@ -0,0 +1,146 @@
+
+
+# Troubleshooting ADL
+
+
+
+
+## Error messages
+
+
+### Error fetching access token:
+
+You aren't authenticated.
+
+### Error fetching access token:  JsonParseException
+
+This means a problem talking to the oauth endpoint.
+
+
+```
+Operation null failed with exception 
com.fasterxml.jackson.core.JsonParseException : Unexpected character ('<' (code 
60)): expected a valid value (number, String, array, object, 'true', 'false' or 
'null')
+  at [Source: 
sun.net.www.protocol.http.HttpURLConnection$HttpInputStream@211d30ed; line: 3, 
column: 2]
+  Last encountered exception thrown after 5 tries. 
[com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException]
+  [ServerRequestId:null]
+  at 
com.microsoft.azure.datalake.store.ADLStoreClient.getExceptionFromResponse(ADLStoreClient.java:1147)
+  at 
com.microsoft.azure.datalake.store.ADLStoreClient.getDirectoryEntry(ADLStoreClient.java:725)
+  at 
org.apache.hadoop.fs.adl.AdlFileSystem.getFileStatus(AdlFileSystem.java:476)
+  at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1713)
+  at 
org.apache.hadoop.fs.contract.ContractTestUtils.rm(ContractTestUtils.java:397)
+  at 
org.apache.hadoop.fs.contract.ContractTestUtils.cleanup(ContractTestUtils.java:374)
+  at 
org.apache.hadoop.fs.contract.AbstractFSContractTestBase.deleteTestDirInTeardown(AbstractFSContractTestBase.java:213)
+  at 
org.apache.hadoop.fs.contract.AbstractFSContractTestBase.teardown(AbstractFSContractTestBase.java:204)
+  at 
org.apache.hadoop.fs.contract.AbstractContractOpenTest.teardown(AbstractContractOpenTest.java:64)
+  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+  at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
+  at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
+  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
+  at 

  1   2   >