hadoop git commit: YARN-6264. AM not launched when a single vcore is available on the cluster. (Yufei Gu via kasha)

2017-03-09 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f298b992f -> 4218671f3


YARN-6264. AM not launched when a single vcore is available on the cluster. 
(Yufei Gu via kasha)

(cherry picked from commit a96afae125ba02fb4480542d3fb0891623ee4c37)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4218671f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4218671f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4218671f

Branch: refs/heads/branch-2
Commit: 4218671f3c46530b9b16bcf1fee5aa823a615d40
Parents: f298b99
Author: Karthik Kambatla 
Authored: Thu Mar 9 23:11:54 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Mar 9 23:16:19 2017 -0800

--
 .../hadoop/yarn/util/resource/Resources.java|  7 +
 .../yarn/util/resource/TestResources.java   | 24 -
 .../scheduler/fair/FSLeafQueue.java |  3 ++-
 .../scheduler/fair/TestFairScheduler.java   | 28 ++--
 4 files changed, 46 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4218671f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index b2f17c4..d22ffd6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -242,6 +242,13 @@ public class Resources {
 out.setVirtualCores((int)(lhs.getVirtualCores() * by));
 return out;
   }
+
+  public static Resource multiplyAndRoundUp(Resource lhs, double by) {
+Resource out = clone(lhs);
+out.setMemorySize((long)Math.ceil(lhs.getMemorySize() * by));
+out.setVirtualCores((int)Math.ceil(lhs.getVirtualCores() * by));
+return out;
+  }
   
   public static Resource normalize(
   ResourceCalculator calculator, Resource lhs, Resource min,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4218671f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
index 057214b..f8570a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.util.resource;
 
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class TestResources {
@@ -46,5 +48,25 @@ public class TestResources {
 assertTrue(Resources.none().compareTo(
 createResource(0, 1)) < 0);
   }
-  
+
+  @Test
+  public void testMultipleRoundUp() {
+final double by = 0.5;
+final String memoryErrorMsg = "Invalid memory size.";
+final String vcoreErrorMsg = "Invalid virtual core number.";
+Resource resource = Resources.createResource(1, 1);
+Resource result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 1);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 1);
+
+resource = Resources.createResource(2, 2);
+result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 1);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 1);
+
+resource = Resources.createResource(0, 0);
+result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 0);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4218671f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 

hadoop git commit: YARN-6264. AM not launched when a single vcore is available on the cluster. (Yufei Gu via kasha)

2017-03-09 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk c5ee7fded -> a96afae12


YARN-6264. AM not launched when a single vcore is available on the cluster. 
(Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a96afae1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a96afae1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a96afae1

Branch: refs/heads/trunk
Commit: a96afae125ba02fb4480542d3fb0891623ee4c37
Parents: c5ee7fde
Author: Karthik Kambatla 
Authored: Thu Mar 9 23:11:54 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Mar 9 23:11:54 2017 -0800

--
 .../hadoop/yarn/util/resource/Resources.java|  7 +
 .../yarn/util/resource/TestResources.java   | 24 -
 .../scheduler/fair/FSLeafQueue.java |  3 ++-
 .../scheduler/fair/TestFairScheduler.java   | 28 ++--
 4 files changed, 46 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 57b3a46..7020300 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -242,6 +242,13 @@ public class Resources {
 out.setVirtualCores((int)(lhs.getVirtualCores() * by));
 return out;
   }
+
+  public static Resource multiplyAndRoundUp(Resource lhs, double by) {
+Resource out = clone(lhs);
+out.setMemorySize((long)Math.ceil(lhs.getMemorySize() * by));
+out.setVirtualCores((int)Math.ceil(lhs.getVirtualCores() * by));
+return out;
+  }
   
   public static Resource normalize(
   ResourceCalculator calculator, Resource lhs, Resource min,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
index 057214b..f8570a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.util.resource;
 
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class TestResources {
@@ -46,5 +48,25 @@ public class TestResources {
 assertTrue(Resources.none().compareTo(
 createResource(0, 1)) < 0);
   }
-  
+
+  @Test
+  public void testMultipleRoundUp() {
+final double by = 0.5;
+final String memoryErrorMsg = "Invalid memory size.";
+final String vcoreErrorMsg = "Invalid virtual core number.";
+Resource resource = Resources.createResource(1, 1);
+Resource result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 1);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 1);
+
+resource = Resources.createResource(2, 2);
+result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 1);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 1);
+
+resource = Resources.createResource(0, 0);
+result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 0);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 

hadoop git commit: HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. Contributed by John Zhuge.

2017-03-09 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 518705b8e -> 843fa685a


HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. 
Contributed by John Zhuge.

Change-Id: Ic956e2eb8189625916442eaffdc69163d32f730e
(cherry picked from commit c5ee7fded46dcb1ac1ea4c1ada4949c50bc89afb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/843fa685
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/843fa685
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/843fa685

Branch: refs/heads/branch-2.8
Commit: 843fa685a613df7d94782c3d623644a0c935aca4
Parents: 518705b
Author: John Zhuge 
Authored: Thu Mar 9 19:54:19 2017 -0800
Committer: John Zhuge 
Committed: Thu Mar 9 19:54:19 2017 -0800

--
 .../META-INF/org.apache.hadoop.fs.FileSystem| 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/843fa685/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 7ec7812..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.adl.AdlFileSystem
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. Contributed by John Zhuge.

2017-03-09 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.0 1aa110fc7 -> 57ad66a6a


HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. 
Contributed by John Zhuge.

Change-Id: Ic956e2eb8189625916442eaffdc69163d32f730e
(cherry picked from commit c5ee7fded46dcb1ac1ea4c1ada4949c50bc89afb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57ad66a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57ad66a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57ad66a6

Branch: refs/heads/branch-2.8.0
Commit: 57ad66a6a7fb7a2fc951ef69dc2a23b6db2522df
Parents: 1aa110f
Author: John Zhuge 
Authored: Thu Mar 9 19:54:36 2017 -0800
Committer: John Zhuge 
Committed: Thu Mar 9 19:54:36 2017 -0800

--
 .../META-INF/org.apache.hadoop.fs.FileSystem| 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57ad66a6/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 7ec7812..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.adl.AdlFileSystem
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. Contributed by John Zhuge.

2017-03-09 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c60cd88ad -> f298b992f


HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. 
Contributed by John Zhuge.

Change-Id: Ic956e2eb8189625916442eaffdc69163d32f730e
(cherry picked from commit c5ee7fded46dcb1ac1ea4c1ada4949c50bc89afb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f298b992
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f298b992
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f298b992

Branch: refs/heads/branch-2
Commit: f298b992f4ede27a2c15857fa45ccf70ade2218b
Parents: c60cd88
Author: John Zhuge 
Authored: Thu Mar 9 19:53:23 2017 -0800
Committer: John Zhuge 
Committed: Thu Mar 9 19:53:23 2017 -0800

--
 .../META-INF/org.apache.hadoop.fs.FileSystem| 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f298b992/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 7ec7812..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.adl.AdlFileSystem
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. Contributed by John Zhuge.

2017-03-09 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/trunk 846a0cd67 -> c5ee7fded


HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. 
Contributed by John Zhuge.

Change-Id: Ic956e2eb8189625916442eaffdc69163d32f730e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5ee7fde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5ee7fde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5ee7fde

Branch: refs/heads/trunk
Commit: c5ee7fded46dcb1ac1ea4c1ada4949c50bc89afb
Parents: 846a0cd
Author: John Zhuge 
Authored: Sun Mar 5 22:34:22 2017 -0800
Committer: John Zhuge 
Committed: Thu Mar 9 18:30:17 2017 -0800

--
 .../META-INF/org.apache.hadoop.fs.FileSystem| 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5ee7fde/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 7ec7812..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.adl.AdlFileSystem
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-6300. NULL_UPDATE_REQUESTS is redundant in TestFairScheduler (Contributed by Yuanbo Liu via Daniel Templeton)

2017-03-09 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3b6e5ef91 -> c60cd88ad


YARN-6300. NULL_UPDATE_REQUESTS is redundant in TestFairScheduler (Contributed 
by Yuanbo Liu via Daniel Templeton)

(cherry picked from commit 8bc8804be1b96f8a4fc33b5d24dd7f7af93a437e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6774f9c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6774f9c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6774f9c3

Branch: refs/heads/branch-2
Commit: 6774f9c3c253c8a6ae4010465f99461825665def
Parents: 3b6e5ef
Author: Daniel Templeton 
Authored: Thu Mar 9 12:12:47 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Mar 9 17:53:24 2017 -0800

--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6774f9c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index bf208e1..7b31535 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -96,7 +96,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdate
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 
 
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -119,7 +118,6 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.xml.sax.SAXException;
 
@@ -130,8 +128,6 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   private final int GB = 1024;
   private final static String ALLOC_FILE =
   new File(TEST_DIR, "test-queues").getAbsolutePath();
-  private final static ContainerUpdates NULL_UPDATE_REQUESTS =
-  new ContainerUpdates();
 
   @Before
   public void setUp() throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-1047. Expose # of pre-emptions as a queue counter (Contributed by Karthik Kambatla via Daniel Templeton)

2017-03-09 Thread templedf
YARN-1047. Expose # of pre-emptions as a queue counter (Contributed by Karthik 
Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c60cd88a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c60cd88a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c60cd88a

Branch: refs/heads/branch-2
Commit: c60cd88ad10f761280d7f659bede9c593958861b
Parents: 6774f9c
Author: Daniel Templeton 
Authored: Thu Mar 9 18:18:03 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Mar 9 18:18:03 2017 -0800

--
 .../server/resourcemanager/scheduler/QueueMetrics.java | 13 +
 .../resourcemanager/scheduler/fair/FSAppAttempt.java   |  5 +
 .../scheduler/fair/TestFairScheduler.java  |  1 +
 .../scheduler/fair/TestFairSchedulerPreemption.java| 12 +---
 4 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c60cd88a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 4e364f7..007d2b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -71,6 +71,8 @@ public class QueueMetrics implements MetricsSource {
   @Metric("Aggregate # of allocated off-switch containers")
 MutableCounterLong aggregateOffSwitchContainersAllocated;
   @Metric("Aggregate # of released containers") MutableCounterLong 
aggregateContainersReleased;
+  @Metric("Aggregate # of preempted containers") MutableCounterLong
+  aggregateContainersPreempted;
   @Metric("Available memory in MB") MutableGaugeLong availableMB;
   @Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores;
   @Metric("Pending memory allocation in MB") MutableGaugeLong pendingMB;
@@ -476,6 +478,13 @@ public class QueueMetrics implements MetricsSource {
 }
   }
 
+  public void preemptContainer() {
+aggregateContainersPreempted.incr();
+if (parent != null) {
+  parent.preemptContainer();
+}
+  }
+
   public void reserveResource(String user, Resource res) {
 reservedContainers.incr();
 reservedMB.incr(res.getMemorySize());
@@ -640,4 +649,8 @@ public class QueueMetrics implements MetricsSource {
   public long getAggegatedReleasedContainers() {
 return aggregateContainersReleased.value();
   }
+
+  public long getAggregatePreemptedContainers() {
+return aggregateContainersPreempted.value();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c60cd88a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 5c6836b..60902a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -55,6 +55,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import 

hadoop git commit: YARN-1047. Expose # of pre-emptions as a queue counter (Contributed by Karthik Kambatla via Daniel Templeton)

2017-03-09 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 819808a01 -> 846a0cd67


YARN-1047. Expose # of pre-emptions as a queue counter (Contributed by Karthik 
Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/846a0cd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/846a0cd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/846a0cd6

Branch: refs/heads/trunk
Commit: 846a0cd678fba743220f28cef844ac9011a3f934
Parents: 819808a
Author: Daniel Templeton 
Authored: Thu Mar 9 17:51:47 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Mar 9 17:51:47 2017 -0800

--
 .../server/resourcemanager/scheduler/QueueMetrics.java | 13 +
 .../resourcemanager/scheduler/fair/FSAppAttempt.java   |  5 +
 .../scheduler/fair/TestFairSchedulerPreemption.java| 12 +---
 3 files changed, 27 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/846a0cd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 4e364f7..007d2b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -71,6 +71,8 @@ public class QueueMetrics implements MetricsSource {
   @Metric("Aggregate # of allocated off-switch containers")
 MutableCounterLong aggregateOffSwitchContainersAllocated;
   @Metric("Aggregate # of released containers") MutableCounterLong 
aggregateContainersReleased;
+  @Metric("Aggregate # of preempted containers") MutableCounterLong
+  aggregateContainersPreempted;
   @Metric("Available memory in MB") MutableGaugeLong availableMB;
   @Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores;
   @Metric("Pending memory allocation in MB") MutableGaugeLong pendingMB;
@@ -476,6 +478,13 @@ public class QueueMetrics implements MetricsSource {
 }
   }
 
+  public void preemptContainer() {
+aggregateContainersPreempted.incr();
+if (parent != null) {
+  parent.preemptContainer();
+}
+  }
+
   public void reserveResource(String user, Resource res) {
 reservedContainers.incr();
 reservedMB.incr(res.getMemorySize());
@@ -640,4 +649,8 @@ public class QueueMetrics implements MetricsSource {
   public long getAggegatedReleasedContainers() {
 return aggregateContainersReleased.value();
   }
+
+  public long getAggregatePreemptedContainers() {
+return aggregateContainersPreempted.value();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/846a0cd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 6c61b45..3a9c94e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 

[2/2] hadoop git commit: HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy.

2017-03-09 Thread wang
HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test 
code. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/819808a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/819808a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/819808a0

Branch: refs/heads/trunk
Commit: 819808a016e16325502169e0091a16a6b2ae5387
Parents: e96a0b8
Author: Andrew Wang 
Authored: Thu Mar 9 17:29:11 2017 -0800
Committer: Andrew Wang 
Committed: Thu Mar 9 17:29:11 2017 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 10 --
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../hdfs/ErasureCodeBenchmarkThroughput.java|  5 +--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 12 +++
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  3 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  3 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  3 +-
 .../hdfs/TestDecommissionWithStriped.java   |  5 ++-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  8 ++---
 .../TestErasureCodingPolicyWithSnapshot.java|  3 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java|  5 ++-
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java |  3 +-
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |  3 +-
 .../hdfs/TestReadStripedFileWithDecoding.java   |  5 ++-
 .../TestReadStripedFileWithMissingBlocks.java   |  3 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |  7 ++---
 .../hdfs/TestSafeModeWithStripedFile.java   |  5 ++-
 .../TestUnsetAndChangeDirectoryEcPolicy.java|  3 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |  5 ++-
 .../hdfs/TestWriteStripedFileWithFailure.java   |  5 ++-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 12 +++
 .../hdfs/server/balancer/TestBalancer.java  |  5 ++-
 .../blockmanagement/TestBlockInfoStriped.java   |  4 +--
 .../TestBlockTokenWithDFSStriped.java   |  6 ++--
 .../TestLowRedundancyBlockQueues.java   |  4 +--
 ...constructStripedBlocksWithRackAwareness.java | 10 +++---
 .../TestSequentialBlockGroupId.java |  6 ++--
 .../TestSortLocatedStripedBlock.java|  4 +--
 .../hdfs/server/datanode/TestBlockRecovery.java |  3 +-
 .../TestDataNodeErasureCodingMetrics.java   |  5 ++-
 .../hadoop/hdfs/server/mover/TestMover.java |  5 ++-
 .../TestAddOverReplicatedStripedBlocks.java |  6 ++--
 .../namenode/TestAddStripedBlockInFBR.java  |  5 +--
 .../server/namenode/TestAddStripedBlocks.java   |  7 +++--
 .../server/namenode/TestEnabledECPolicies.java  | 12 +++
 .../server/namenode/TestFSEditLogLoader.java|  3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 33 +---
 .../server/namenode/TestNameNodeMXBean.java | 12 +++
 .../namenode/TestQuotaWithStripedBlocks.java|  3 +-
 .../namenode/TestReconstructStripedBlocks.java  |  6 ++--
 .../server/namenode/TestStripedINodeFile.java   |  5 +--
 ...TestOfflineImageViewerWithStripedBlocks.java |  8 ++---
 42 files changed, 121 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 02cbbdf..29af207 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -130,16 +130,6 @@ public final class ErasureCodingPolicyManager {
   }
 
   /**
-   * Get system-wide default policy, which can be used by default
-   * when no policy is specified for a path.
-   * @return ecPolicy
-   */
-  public static ErasureCodingPolicy getSystemDefaultPolicy() {
-// make this configurable?
-return SYS_POLICY1;
-  }
-
-  /**
* Get a policy by policy ID.
* @return ecPolicy, or null if not found
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 7bf5cdc..1329195 100644
--- 

[1/2] hadoop git commit: HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy.

2017-03-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk e96a0b8c9 -> 819808a01


http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
index e7794d6..0bfa054 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
@@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -46,7 +46,7 @@ import org.junit.Test;
 
 public class TestOfflineImageViewerWithStripedBlocks {
   private final ErasureCodingPolicy ecPolicy =
-  ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  StripedFileTestUtil.getDefaultECPolicy();
   private int dataBlocks = ecPolicy.getNumDataUnits();
   private int parityBlocks = ecPolicy.getNumParityUnits();
 
@@ -64,7 +64,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 cluster.waitActive();
 cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
-ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
+StripedFileTestUtil.getDefaultECPolicy().getName());
 fs = cluster.getFileSystem();
 Path eczone = new Path("/eczone");
 fs.mkdirs(eczone);
@@ -144,7 +144,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
 // Verify space consumed present in BlockInfoStriped
 FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
 INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
-assertEquals(ErasureCodingPolicyManager.getSystemDefaultPolicy().getId(),
+assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(),
 fileNode.getErasureCodingPolicyID());
 assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
 long actualFileSize = 0;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/33] hadoop git commit: YARN-5669. Add support for docker pull command (Contribtued by luhuichun)

2017-03-09 Thread jhung
YARN-5669. Add support for docker pull command (Contribtued by luhuichun)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e96a0b8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e96a0b8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e96a0b8c

Branch: refs/heads/YARN-5734
Commit: e96a0b8c92b46aed7c1f5ccec13abc6c1043edba
Parents: 822a74f
Author: Sidharta S 
Authored: Thu Mar 9 16:22:19 2017 -0800
Committer: Sidharta S 
Committed: Thu Mar 9 16:22:19 2017 -0800

--
 .../linux/runtime/docker/DockerPullCommand.java | 31 +
 .../runtime/docker/TestDockerPullCommand.java   | 49 
 2 files changed, 80 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e96a0b8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
new file mode 100644
index 000..351e09e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+/**
+ * Encapsulates the docker pull command and its command
+ * line arguments.
+ */
+public class DockerPullCommand extends DockerCommand {
+  private static final String PULL_COMMAND = "pull";
+
+  public DockerPullCommand(String imageName) {
+super(PULL_COMMAND);
+super.addCommandArguments(imageName);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e96a0b8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
new file mode 100644
index 000..89157ff
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing 

[28/33] hadoop git commit: HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by Lukas Majercak and Manoj Govindassamy.

2017-03-09 Thread jhung
HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by 
Lukas Majercak and Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/385d2cb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/385d2cb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/385d2cb7

Branch: refs/heads/YARN-5734
Commit: 385d2cb777a0272ac20c62336c944fad295d5d12
Parents: 570827a
Author: Masatake Iwasaki 
Authored: Thu Mar 9 13:30:33 2017 +0900
Committer: Masatake Iwasaki 
Committed: Thu Mar 9 21:13:50 2017 +0900

--
 .../server/blockmanagement/BlockManager.java| 10 +++-
 .../apache/hadoop/hdfs/TestDecommission.java| 48 ++
 .../hadoop/hdfs/TestMaintenanceState.java   | 51 
 3 files changed, 108 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d2cb7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9ec28f9..5dc40fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -891,7 +891,15 @@ public class BlockManager implements BlockStatsMXBean {
   lastBlock.getUnderConstructionFeature()
   .updateStorageScheduledSize((BlockInfoStriped) lastBlock);
 }
-if (hasMinStorage(lastBlock)) {
+
+// Count replicas on decommissioning nodes, as these will not be
+// decommissioned unless recovery/completing last block has finished
+NumberReplicas numReplicas = countNodes(lastBlock);
+int numUsableReplicas = numReplicas.liveReplicas() +
+numReplicas.decommissioning() +
+numReplicas.liveEnteringMaintenanceReplicas();
+
+if (hasMinStorage(lastBlock, numUsableReplicas)) {
   if (committed) {
 addExpectedReplicasToPending(lastBlock);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d2cb7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 94e8946..dc0edcc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -33,6 +33,7 @@ import java.util.concurrent.ExecutionException;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -646,6 +647,53 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 fdos.close();
   }
+
+  @Test(timeout = 36)
+  public void testDecommissionWithOpenFileAndBlockRecovery()
+  throws IOException, InterruptedException {
+startCluster(1, 6);
+getCluster().waitActive();
+
+Path file = new Path("/testRecoveryDecommission");
+
+// Create a file and never close the output stream to trigger recovery
+DistributedFileSystem dfs = getCluster().getFileSystem();
+FSDataOutputStream out = dfs.create(file, true,
+getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 
4096),
+(short) 3, blockSize);
+
+// Write data to the file
+long writtenBytes = 0;
+while (writtenBytes < fileSize) {
+  out.writeLong(writtenBytes);
+  writtenBytes += 8;
+}
+out.hsync();
+
+DatanodeInfo[] lastBlockLocations = NameNodeAdapter.getBlockLocations(
+  getCluster().getNameNode(), "/testRecoveryDecommission", 0, fileSize)
+  .getLastLocatedBlock().getLocations();
+
+// Decommission all nodes of the last block
+ArrayList toDecom = new ArrayList<>();
+for (DatanodeInfo dnDecom : lastBlockLocations) {
+  toDecom.add(dnDecom.getXferAddr());
+}
+initExcludeHosts(toDecom);
+refreshNodes(0);
+
+// Make sure hard lease expires to trigger replica recovery
+getCluster().setLeasePeriod(300L, 300L);
+

[05/33] hadoop git commit: HADOOP-14048. REDO operation of WASB#AtomicRename should create placeholder blob for destination folder. Contributed by NITIN VERMA

2017-03-09 Thread jhung
HADOOP-14048. REDO operation of WASB#AtomicRename should create placeholder 
blob for destination folder. Contributed by NITIN VERMA


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c571cda5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c571cda5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c571cda5

Branch: refs/heads/YARN-5734
Commit: c571cda5c7d929477961dfff4176d7de4944d874
Parents: b5adc5c
Author: Mingliang Liu 
Authored: Mon Mar 6 16:53:30 2017 -0800
Committer: Mingliang Liu 
Committed: Mon Mar 6 17:00:13 2017 -0800

--
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c571cda5/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 0dfefaf..b1956a7 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -566,6 +566,16 @@ public class NativeAzureFileSystem extends FileSystem {
 // Remove the source folder. Don't check explicitly if it exists,
 // to avoid triggering redo recursively.
 try {
+  // Rename the source folder 0-byte root file
+  // as destination folder 0-byte root file.
+  FileMetadata srcMetaData = this.getSourceMetadata();
+  if (srcMetaData.getBlobMaterialization() == 
BlobMaterialization.Explicit) {
+// We already have a lease. So let's just rename the source blob
+// as destination blob under same lease.
+fs.getStoreInterface().rename(this.getSrcKey(), this.getDstKey(), 
false, lease);
+  }
+
+  // Now we can safely delete the source folder.
   fs.getStoreInterface().delete(srcKey, lease);
 } catch (Exception e) {
   LOG.info("Unable to delete source folder during folder rename redo. "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/33] hadoop git commit: HADOOP-14052. Fix dead link in KMS document. Contributed by Christina Vu.

2017-03-09 Thread jhung
HADOOP-14052. Fix dead link in KMS document. Contributed by Christina Vu.

Change-Id: I7093f443d93927184196f62f02cc106a2c89e9cf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/570827a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/570827a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/570827a8

Branch: refs/heads/YARN-5734
Commit: 570827a819c586b31e88621a9bb1d8118d3c7df3
Parents: 33a38a5
Author: John Zhuge 
Authored: Wed Mar 8 23:50:15 2017 -0800
Committer: John Zhuge 
Committed: Wed Mar 8 23:50:15 2017 -0800

--
 hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/570827a8/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
--
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index c1f9b13..4573b06 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -956,7 +956,7 @@ $H4 Re-encrypt Encrypted Key With The Latest KeyVersion
 
 This command takes a previously generated encrypted key, and re-encrypts it 
using the latest KeyVersion encryption key in the KeyProvider. If the latest 
KeyVersion is the same as the one used to generate the encrypted key, the same 
encrypted key is returned.
 
-This is usually useful after a [Rollover](Rollover_Key) of an encryption key. 
Re-encrypting the encrypted key will allow it to be encrypted using the latest 
version of the encryption key, but still with the same key material and 
initialization vector.
+This is usually useful after a [Rollover](#Rollover_Key) of an encryption key. 
Re-encrypting the encrypted key will allow it to be encrypted using the latest 
version of the encryption key, but still with the same key material and 
initialization vector.
 
 *REQUEST:*
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/33] hadoop git commit: HADOOP-14087. S3A typo in pom.xml test exclusions. Contributed by Aaron Fabbri.

2017-03-09 Thread jhung
HADOOP-14087. S3A typo in pom.xml test exclusions. Contributed by Aaron Fabbri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f597f4c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f597f4c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f597f4c4

Branch: refs/heads/YARN-5734
Commit: f597f4c43e0a6e2304b9bcaf727d6d8d15a365f9
Parents: f01a69f
Author: Akira Ajisaka 
Authored: Tue Mar 7 15:14:55 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 7 15:14:55 2017 +0900

--
 hadoop-tools/hadoop-aws/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f597f4c4/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index e5bbbfd..c188055 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -184,7 +184,7 @@
 **/ITest*Root*.java
 **/ITestS3AFileContextStatistics.java
 **/ITestS3AEncryptionSSE*.java
-**/ITestS3AHuge*.java
+**/ITestS3AHuge*.java
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/33] hadoop git commit: HDFS-11314. Enforce set of enabled EC policies on the NameNode.

2017-03-09 Thread jhung
HDFS-11314. Enforce set of enabled EC policies on the NameNode.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33a38a53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33a38a53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33a38a53

Branch: refs/heads/YARN-5734
Commit: 33a38a534110de454662256545a7f4c075d328c8
Parents: 5ca6ef0
Author: Andrew Wang 
Authored: Wed Mar 8 16:41:44 2017 -0800
Committer: Andrew Wang 
Committed: Wed Mar 8 16:41:44 2017 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../namenode/ErasureCodingPolicyManager.java|  97 
 .../server/namenode/FSDirErasureCodingOp.java   |  54 +++
 .../server/namenode/FSImageFormatPBINode.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   4 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  24 ++-
 .../src/main/resources/hdfs-default.xml |   9 ++
 .../src/site/markdown/HDFSErasureCoding.md  |  45 +++---
 .../apache/hadoop/cli/TestErasureCodingCLI.java |   3 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  11 ++
 .../TestDFSRSDefault10x4StripedInputStream.java |   2 +-
 ...TestDFSRSDefault10x4StripedOutputStream.java |   2 +-
 ...fault10x4StripedOutputStreamWithFailure.java |   2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   2 +
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   1 +
 .../TestDFSStripedOutputStreamWithFailure.java  |   1 +
 .../hdfs/TestDFSXORStripedInputStream.java  |   2 +-
 .../hdfs/TestDFSXORStripedOutputStream.java |   2 +-
 ...estDFSXORStripedOutputStreamWithFailure.java |   2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  41 -
 .../TestUnsetAndChangeDirectoryEcPolicy.java|   5 +-
 .../server/namenode/TestEnabledECPolicies.java  | 151 +++
 .../hdfs/server/namenode/TestFSImage.java   |   9 +-
 .../TestOfflineImageViewer.java |  11 +-
 .../test/resources/testErasureCodingConf.xml|  21 ++-
 26 files changed, 399 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a38a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 82d6073..3fc4980 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -562,6 +562,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT =
   "10m";
 
+  public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_KEY = 
"dfs.namenode.ec.policies.enabled";
+  public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = 
"RS-6-3-64k";
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.threads";
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.buffer.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a38a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index a1b2270..02cbbdf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -18,12 +18,17 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 
+import java.util.Arrays;
 import java.util.Map;
 import java.util.TreeMap;
+import 

[29/33] hadoop git commit: YARN-6300. NULL_UPDATE_REQUESTS is redundant in TestFairScheduler (Contributed by Yuanbo Liu via Daniel Templeton)

2017-03-09 Thread jhung
YARN-6300. NULL_UPDATE_REQUESTS is redundant in TestFairScheduler (Contributed 
by Yuanbo Liu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/822a74f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/822a74f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/822a74f2

Branch: refs/heads/YARN-5734
Commit: 822a74f2ae955ea0893cc02fb36ceb49ceba8014
Parents: 385d2cb
Author: Daniel Templeton 
Authored: Thu Mar 9 12:12:47 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Mar 9 12:14:33 2017 -0800

--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/822a74f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 31dd7fe..028eea6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -96,7 +96,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdate
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 
 
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -119,7 +118,6 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.xml.sax.SAXException;
 
@@ -130,8 +128,6 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   private final int GB = 1024;
   private final static String ALLOC_FILE =
   new File(TEST_DIR, "test-queues").getAbsolutePath();
-  private final static ContainerUpdates NULL_UPDATE_REQUESTS =
-  new ContainerUpdates();
 
   @Before
   public void setUp() throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/33] hadoop git commit: MAPREDUCE-6855. Specify charset when create String in CredentialsTestJob. Contributed by Kai Sasaki.

2017-03-09 Thread jhung
MAPREDUCE-6855. Specify charset when create String in CredentialsTestJob. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14413989
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14413989
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14413989

Branch: refs/heads/YARN-5734
Commit: 14413989cac9acc1fa6f8d330fac32f772613325
Parents: 6868235
Author: Akira Ajisaka 
Authored: Tue Mar 7 13:10:59 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 7 13:10:59 2017 +0900

--
 .../org/apache/hadoop/mapreduce/security/CredentialsTestJob.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14413989/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
index e66fb2f..755e2df 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.security;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 
 
 import org.apache.hadoop.conf.Configuration;
@@ -64,7 +65,7 @@ public class CredentialsTestJob extends Configured implements 
Tool {
 // fail the test
   }
 
-  String secretValueStr = new String (secretValue);
+  String secretValueStr = new String (secretValue, StandardCharsets.UTF_8);
   System.out.println(secretValueStr);
 
   if  ( !("password"+i).equals(secretValueStr)){


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/33] hadoop git commit: HDFS-11152. Start erasure coding policy ID number from 1 instead of 0 to void potential unexpected errors. Contributed by SammiChen.

2017-03-09 Thread jhung
HDFS-11152. Start erasure coding policy ID number from 1 instead of 0 to void 
potential unexpected errors. Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5addacb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5addacb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5addacb1

Branch: refs/heads/YARN-5734
Commit: 5addacb1e301991a8285a221c726f66330cd6d08
Parents: 4ebe8a6
Author: Andrew Wang 
Authored: Wed Mar 8 08:47:38 2017 -0800
Committer: Andrew Wang 
Committed: Wed Mar 8 08:47:38 2017 -0800

--
 .../org/apache/hadoop/hdfs/protocol/HdfsConstants.java| 10 +-
 .../hadoop/hdfs/server/namenode/FSImageFormatPBINode.java |  3 +++
 2 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5addacb1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index a9f1839..d2209a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -144,11 +144,11 @@ public final class HdfsConstants {
 ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE
   }
 
-  public static final byte RS_6_3_POLICY_ID = 0;
-  public static final byte RS_3_2_POLICY_ID = 1;
-  public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
-  public static final byte XOR_2_1_POLICY_ID = 3;
-  public static final byte RS_10_4_POLICY_ID = 4;
+  public static final byte RS_6_3_POLICY_ID = 1;
+  public static final byte RS_3_2_POLICY_ID = 2;
+  public static final byte RS_6_3_LEGACY_POLICY_ID = 3;
+  public static final byte XOR_2_1_POLICY_ID = 4;
+  public static final byte RS_10_4_POLICY_ID = 5;
 
   /* Hidden constructor */
   protected HdfsConstants() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5addacb1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 0ceae78..17b1da7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -342,6 +342,9 @@ public final class FSImageFormatPBINode {
   for (int i = 0; i < bp.size(); ++i) {
 BlockProto b = bp.get(i);
 if (isStriped) {
+  Preconditions.checkState(ecPolicy.getId() > 0,
+  "File with ID " + n.getId() +
+  " has an invalid erasure coding policy ID " + ecPolicy.getId());
   blocks[i] = new BlockInfoStriped(PBHelperClient.convert(b), 
ecPolicy);
 } else {
   blocks[i] = new BlockInfoContiguous(PBHelperClient.convert(b),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/33] hadoop git commit: YARN-6287. RMCriticalThreadUncaughtExceptionHandler.rmContext should be final (Contributed by Corey Barker via Daniel Templeton)

2017-03-09 Thread jhung
YARN-6287. RMCriticalThreadUncaughtExceptionHandler.rmContext should be final 
(Contributed by Corey Barker via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0c239cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0c239cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0c239cd

Branch: refs/heads/YARN-5734
Commit: e0c239cdbda336e09a35d112d451c2e17d74a3fc
Parents: 1f9848d
Author: Daniel Templeton 
Authored: Tue Mar 7 11:58:48 2017 -0800
Committer: Daniel Templeton 
Committed: Tue Mar 7 11:58:48 2017 -0800

--
 .../resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0c239cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
index c5c6087..a67f81a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
@@ -37,7 +37,7 @@ public class RMCriticalThreadUncaughtExceptionHandler
 implements UncaughtExceptionHandler {
   private static final Log LOG = LogFactory.getLog(
   RMCriticalThreadUncaughtExceptionHandler.class);
-  private RMContext rmContext;
+  private final RMContext rmContext;
 
   public RMCriticalThreadUncaughtExceptionHandler(RMContext rmContext) {
 this.rmContext = rmContext;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/33] hadoop git commit: YARN-6275. Fail to show real-time tracking charts in SLS (yufeigu via rkanter)

2017-03-09 Thread jhung
YARN-6275. Fail to show real-time tracking charts in SLS (yufeigu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1598fd3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1598fd3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1598fd3b

Branch: refs/heads/YARN-5734
Commit: 1598fd3b7948b3592775e3be3227c4a336122bc9
Parents: 38d75df
Author: Robert Kanter 
Authored: Tue Mar 7 13:47:52 2017 -0800
Committer: Robert Kanter 
Committed: Tue Mar 7 13:47:52 2017 -0800

--
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh   | 8 ++--
 .../main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java  | 2 ++
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1598fd3b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
--
diff --git a/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh 
b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
index 19b5c34..fb53045 100644
--- a/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
+++ b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
@@ -103,12 +103,16 @@ function run_simulation() {
   hadoop_java_exec sls org.apache.hadoop.yarn.sls.SLSRunner ${args}
 }
 
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# copy 'html' directory to current directory to make sure web sever can access
+cp -r "${bin}/../html" "$(pwd)"
+
 # let's locate libexec...
 if [[ -n "${HADOOP_HOME}" ]]; then
   HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
 else
-  this="${BASH_SOURCE-$0}"
-  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
   HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../../../../../libexec"
 fi
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1598fd3b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
index abdf106..33d4846 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.scheduler.FairSchedulerMetrics;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerMetrics;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerWrapper;
+import org.eclipse.jetty.http.MimeTypes;
 import org.eclipse.jetty.server.Handler;
 import org.eclipse.jetty.server.Request;
 import org.eclipse.jetty.server.Server;
@@ -118,6 +119,7 @@ public class SLSWebApp extends HttpServlet {
   public void start() throws Exception {
 // static files
 final ResourceHandler staticHandler = new ResourceHandler();
+staticHandler.setMimeTypes(new MimeTypes());
 staticHandler.setResourceBase("html");
 
 Handler handler = new AbstractHandler() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/33] hadoop git commit: YARN-6207. Move application across queues should handle delayed event processing. Contributed by Bibin A Chundatt.

2017-03-09 Thread jhung
YARN-6207. Move application across queues should handle delayed event 
processing. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1eb81867
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1eb81867
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1eb81867

Branch: refs/heads/YARN-5734
Commit: 1eb81867032b016a59662043cbae50daa52dafa9
Parents: 28daaf0
Author: Sunil G 
Authored: Wed Mar 8 12:04:30 2017 +0530
Committer: Sunil G 
Committed: Wed Mar 8 12:04:30 2017 +0530

--
 .../scheduler/SchedulerApplicationAttempt.java  |   5 +-
 .../scheduler/capacity/CapacityScheduler.java   |  69 ---
 .../capacity/TestCapacityScheduler.java | 200 +++
 3 files changed, 248 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eb81867/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index f894a40..91e29d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1069,6 +1069,7 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
   QueueMetrics newMetrics = newQueue.getMetrics();
   String newQueueName = newQueue.getQueueName();
   String user = getUser();
+
   for (RMContainer liveContainer : liveContainers.values()) {
 Resource resource = liveContainer.getContainer().getResource();
 ((RMContainerImpl) liveContainer).setQueueName(newQueueName);
@@ -1084,7 +1085,9 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 }
   }
 
-  appSchedulingInfo.move(newQueue);
+  if (!isStopped) {
+appSchedulingInfo.move(newQueue);
+  }
   this.queue = newQueue;
 } finally {
   writeLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eb81867/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 20ea607..f6e7942 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1939,36 +1939,47 @@ public class CapacityScheduler extends
   String targetQueueName) throws YarnException {
 try {
   writeLock.lock();
-  FiCaSchedulerApp app = getApplicationAttempt(
-  ApplicationAttemptId.newInstance(appId, 0));
-  String sourceQueueName = app.getQueue().getQueueName();
-  LeafQueue source = this.queueManager.getAndCheckLeafQueue(
-  sourceQueueName);
+  SchedulerApplication application =
+  applications.get(appId);
+  if (application == null) {
+throw new YarnException("App to be moved " + appId + " not found.");
+  }
+  String sourceQueueName = application.getQueue().getQueueName();
+  LeafQueue source =
+  this.queueManager.getAndCheckLeafQueue(sourceQueueName);
   String destQueueName = handleMoveToPlanQueue(targetQueueName);
   LeafQueue dest = this.queueManager.getAndCheckLeafQueue(destQueueName);
 
-  

[12/33] hadoop git commit: HDFS-11508. Fix bind failure in SimpleTCPServer & Portmap where bind fails because socket is in TIME_WAIT state. Contributed by Mukul Kumar Singh.

2017-03-09 Thread jhung
HDFS-11508. Fix bind failure in SimpleTCPServer & Portmap where bind fails 
because socket is in TIME_WAIT state. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f9848df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f9848df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f9848df

Branch: refs/heads/YARN-5734
Commit: 1f9848dfe1fc9148cbbcfcc3dfed948b9e0f3c3c
Parents: 959940b
Author: Arpit Agarwal 
Authored: Tue Mar 7 11:41:05 2017 -0800
Committer: Arpit Agarwal 
Committed: Tue Mar 7 11:41:05 2017 -0800

--
 .../src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java| 1 +
 .../src/main/java/org/apache/hadoop/portmap/Portmap.java   | 2 ++
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9848df/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
index 99d1d6f..f7ab52e 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
@@ -81,6 +81,7 @@ public class SimpleTcpServer {
 });
 server.setOption("child.tcpNoDelay", true);
 server.setOption("child.keepAlive", true);
+server.setOption("reuseAddress", true);
 
 // Listen to TCP port
 ch = server.bind(new InetSocketAddress(port));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9848df/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
index 2b88791..94d76d0 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
@@ -109,12 +109,14 @@ final class Portmap {
 RpcUtil.STAGE_RPC_TCP_RESPONSE);
   }
 });
+tcpServer.setOption("reuseAddress", true);
 
 udpServer = new ConnectionlessBootstrap(new NioDatagramChannelFactory(
 Executors.newCachedThreadPool()));
 
 udpServer.setPipeline(Channels.pipeline(RpcUtil.STAGE_RPC_MESSAGE_PARSER,
 handler, RpcUtil.STAGE_RPC_UDP_RESPONSE));
+udpServer.setOption("reuseAddress", true);
 
 tcpChannel = tcpServer.bind(tcpAddress);
 udpChannel = udpServer.bind(udpAddress);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/33] hadoop git commit: YARN-6165. Intra-queue preemption occurs even when preemption is turned off for a specific queue. Contributed by Eric Payne

2017-03-09 Thread jhung
YARN-6165. Intra-queue preemption occurs even when preemption is turned off for 
a specific queue. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7762a55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7762a55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7762a55

Branch: refs/heads/YARN-5734
Commit: d7762a55113a529abd6f4ecb8e6d9b0a84b56e08
Parents: 2be8947
Author: Jason Lowe 
Authored: Wed Mar 8 16:46:09 2017 -0600
Committer: Jason Lowe 
Committed: Wed Mar 8 16:46:09 2017 -0600

--
 .../capacity/IntraQueueCandidatesSelector.java  |  5 ++
 ...ionalCapacityPreemptionPolicyIntraQueue.java | 55 
 2 files changed, 60 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7762a55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 4f2b272..2890414 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -112,6 +112,11 @@ public class IntraQueueCandidatesSelector extends 
PreemptionCandidatesSelector {
   continue;
 }
 
+// Don't preempt if disabled for this queue.
+if (leafQueue.getPreemptionDisabled()) {
+  continue;
+}
+
 // 5. Calculate the resource to obtain per partition
 Map resToObtainByPartition = 
fifoPreemptionComputePlugin
 .getResourceDemandFromAppsPerQueue(queueName, partition);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7762a55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
index 19fb0d2..bf83e1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
@@ -106,6 +106,61 @@ public class 
TestProportionalCapacityPreemptionPolicyIntraQueue
   }
 
   @Test
+  public void testNoIntraQueuePreemptionWithPreemptionDisabledOnQueues()
+  throws IOException {
+/**
+ * This test has the same configuration as testSimpleIntraQueuePreemption
+ * except that preemption is disabled specifically for each queue. The
+ * purpose is to test that disabling preemption on a specific queue will
+ * avoid intra-queue preemption.
+ */
+conf.setPreemptionDisabled("root.a", true);
+conf.setPreemptionDisabled("root.b", true);
+conf.setPreemptionDisabled("root.c", true);
+conf.setPreemptionDisabled("root.d", true);
+
+String labelsConfig = "=100,true;";
+String nodesConfig = // n1 has no label
+"n1= res=100";
+String queuesConfig =
+// guaranteed,max,used,pending,reserved
+"root(=[100 100 80 120 0]);" + // root
+"-a(=[11 100 11 50 0]);" + // a
+"-b(=[40 100 38 60 0]);" + // b
+"-c(=[20 100 10 10 0]);" + // c
+

[14/33] hadoop git commit: MAPREDUCE-6839. TestRecovery.testCrashed failed (pairg via rkanter)

2017-03-09 Thread jhung
MAPREDUCE-6839. TestRecovery.testCrashed failed (pairg via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38d75dfd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38d75dfd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38d75dfd

Branch: refs/heads/YARN-5734
Commit: 38d75dfd3a643f8a1acd52e025a466d65065b60e
Parents: e0c239c
Author: Robert Kanter 
Authored: Tue Mar 7 13:34:46 2017 -0800
Committer: Robert Kanter 
Committed: Tue Mar 7 13:34:46 2017 -0800

--
 .../apache/hadoop/mapreduce/v2/app/TestRecovery.java| 12 +++-
 1 file changed, 3 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38d75dfd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
index 071575a..6332c5d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
@@ -159,9 +159,7 @@ public class TestRecovery {
 app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
 app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
 
-// reduces must be in NEW state
-Assert.assertEquals("Reduce Task state not correct",
-TaskState.RUNNING, reduceTask.getReport().getTaskState());
+app.waitForState(reduceTask, TaskState.RUNNING);
 
 /// Play some games with the TaskAttempts of the first task //
 //send the fail signal to the 1st map task attempt
@@ -1301,9 +1299,7 @@ public class TestRecovery {
 app.waitForState(task1Attempt2, TaskAttemptState.RUNNING);
 app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
 
-// reduces must be in NEW state
-Assert.assertEquals("Reduce Task state not correct",
-TaskState.RUNNING, reduceTask.getReport().getTaskState());
+app.waitForState(reduceTask, TaskState.RUNNING);
 
 //send the done signal to the map 1 attempt 1
 app.getContext().getEventHandler().handle(
@@ -1431,9 +1427,7 @@ public class TestRecovery {
 app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
 app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
 
-// reduces must be in NEW state
-Assert.assertEquals("Reduce Task state not correct",
-TaskState.RUNNING, reduceTask.getReport().getTaskState());
+app.waitForState(reduceTask, TaskState.RUNNING);
 
 //send the done signal to the 1st map attempt
 app.getContext().getEventHandler().handle(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/33] hadoop git commit: YARN-6297. TestAppLogAggregatorImp.verifyFilesUploaded() should check # of filed uploaded with that of files expected (haibochen via rkanter)

2017-03-09 Thread jhung
YARN-6297. TestAppLogAggregatorImp.verifyFilesUploaded() should check # of 
filed uploaded with that of files expected (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/287ba4ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/287ba4ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/287ba4ff

Branch: refs/heads/YARN-5734
Commit: 287ba4ffa66212c02e1b1edc8fca53f6368a9efc
Parents: 98142d2
Author: Robert Kanter 
Authored: Wed Mar 8 10:45:33 2017 -0800
Committer: Robert Kanter 
Committed: Wed Mar 8 10:45:33 2017 -0800

--
 .../logaggregation/TestAppLogAggregatorImpl.java | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/287ba4ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index 2602d55..17d527a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -146,7 +146,7 @@ public class TestAppLogAggregatorImpl {
 
 verifyLogAggregationWithExpectedFiles2DeleteAndUpload(applicationId,
 containerId, logRententionSec, recoveredLogInitedTimeMillis,
-logFiles, new HashSet());
+logFiles, logFiles);
   }
 
   @Test
@@ -170,7 +170,7 @@ public class TestAppLogAggregatorImpl {
 
 final long week = 7 * 24 * 60 * 60;
 final long recoveredLogInitedTimeMillis = System.currentTimeMillis() -
-2*week;
+2 * week * 1000;
 verifyLogAggregationWithExpectedFiles2DeleteAndUpload(
 applicationId, containerId, week, recoveredLogInitedTimeMillis,
 logFiles, new HashSet());
@@ -257,7 +257,7 @@ public class TestAppLogAggregatorImpl {
   Set filesExpected) {
 final String errMsgPrefix = "The set of files uploaded are not the same " +
 "as expected";
-if(filesUploaded.size() != filesUploaded.size()) {
+if(filesUploaded.size() != filesExpected.size()) {
   fail(errMsgPrefix + ": actual size: " + filesUploaded.size() + " vs " +
   "expected size: " + filesExpected.size());
 }
@@ -413,7 +413,7 @@ public class TestAppLogAggregatorImpl {
 FileContext lfs, long recoveredLogInitedTime) throws IOException {
   super(dispatcher, deletionService, conf, appId, ugi, nodeId,
   dirsHandler, remoteNodeLogFileForApp, appAcls,
-  logAggregationContext, context, lfs, recoveredLogInitedTime);
+  logAggregationContext, context, lfs, -1, recoveredLogInitedTime);
   this.applicationId = appId;
   this.deletionService = deletionService;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/33] hadoop git commit: HDFS-10983. OIV tool should make an EC file explicit. Contributed by Manoj Govindassamy.

2017-03-09 Thread jhung
HDFS-10983. OIV tool should make an EC file explicit. Contributed by Manoj 
Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ca6ef0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ca6ef0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ca6ef0c

Branch: refs/heads/YARN-5734
Commit: 5ca6ef0c268b1acb3abf12505b9ead6fe7e38a23
Parents: d7762a5
Author: Andrew Wang 
Authored: Wed Mar 8 15:36:19 2017 -0800
Committer: Andrew Wang 
Committed: Wed Mar 8 15:36:19 2017 -0800

--
 .../server/namenode/FSImageFormatPBINode.java   |  1 +
 .../OfflineImageReconstructor.java  |  4 +
 .../offlineImageViewer/PBImageXmlWriter.java| 15 ++-
 .../hdfs/server/namenode/TestFSImage.java   |  1 +
 .../TestOfflineImageViewer.java | 99 +++-
 5 files changed, 112 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ca6ef0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 17b1da7..ef334f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -332,6 +332,7 @@ public final class FSImageFormatPBINode {
   BlockType blockType = PBHelperClient.convert(f.getBlockType());
   LoaderContext state = parent.getLoaderContext();
   boolean isStriped = f.hasErasureCodingPolicyID();
+  assert ((!isStriped) || (isStriped && !f.hasReplication()));
   Short replication = (!isStriped ? (short) f.getReplication() : null);
   ErasureCodingPolicy ecPolicy = isStriped ?
   ErasureCodingPolicyManager.getPolicyByPolicyID(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ca6ef0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index ed348d3..e80f4d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -647,6 +647,10 @@ class OfflineImageReconstructor {
 break;
   case "STRIPED":
 bld.setBlockType(HdfsProtos.BlockTypeProto.STRIPED);
+ival = node.removeChildInt(INODE_SECTION_EC_POLICY_ID);
+if (ival != null) {
+  bld.setErasureCodingPolicyID(ival);
+}
 break;
   default:
 throw new IOException("INode XML found with unknown  " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ca6ef0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index f8734cb..5a42a6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -40,7 +40,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheD
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
 import 

[07/33] hadoop git commit: HADOOP-13930. Azure: Add Authorization support to WASB. Contributed by Sivaguru Sankaridurg and Dushyanth

2017-03-09 Thread jhung
HADOOP-13930. Azure: Add Authorization support to WASB. Contributed by Sivaguru 
Sankaridurg and Dushyanth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68682352
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68682352
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68682352

Branch: refs/heads/YARN-5734
Commit: 686823529be09bea2a6cecb3503ef722017475bc
Parents: 52d7d5a
Author: Mingliang Liu 
Authored: Mon Mar 6 17:16:36 2017 -0800
Committer: Mingliang Liu 
Committed: Mon Mar 6 17:16:36 2017 -0800

--
 .../src/main/resources/core-default.xml |  10 +
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../fs/azure/AzureNativeFileSystemStore.java|   5 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 116 ++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 190 ++
 .../fs/azure/WasbAuthorizationException.java|  40 +++
 .../fs/azure/WasbAuthorizationOperations.java   |  44 +++
 .../fs/azure/WasbAuthorizerInterface.java   |  53 +++
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   |  71 +++-
 .../hadoop-azure/src/site/markdown/index.md |  34 ++
 .../fs/azure/AzureBlobStorageTestAccount.java   |  61 ++--
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 102 ++
 .../TestNativeAzureFileSystemAuthorization.java | 344 +++
 .../fs/azure/TestWasbRemoteCallHelper.java  | 344 +++
 .../src/test/resources/azure-test.xml   |  28 +-
 15 files changed, 1373 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68682352/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 35be56b..52b58ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1292,6 +1292,16 @@
 to specify the time (such as 2s, 2m, 1h, etc.).
   
 
+
+  fs.azure.authorization
+  false
+  
+Config flag to enable authorization support in WASB. Setting it to "true" 
enables
+authorization support to WASB. Currently WASB authorization requires a 
remote service
+to provide authorization that needs to be specified via 
fs.azure.authorization.remote.service.url
+configuration
+  
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68682352/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 966a8ac..cbfb6d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -114,6 +114,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.sas.expiry.period");
 xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
+xmlPropsToSkipCompare.add("fs.azure.authorization");
 
 // Deprecated properties.  These should eventually be removed from the
 // class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68682352/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 07c389c..a8708ec 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -249,7 +249,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
* Default values to control SAS Key mode.
* By default we set the values to false.
*/
-  private static final boolean DEFAULT_USE_SECURE_MODE = false;
+  public static final boolean DEFAULT_USE_SECURE_MODE = false;
   private static final boolean DEFAULT_USE_LOCAL_SAS_KEY_MODE = 

[33/33] hadoop git commit: YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store

2017-03-09 Thread jhung
YARN-5948. Implement MutableConfigurationManager for handling storage into 
configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d219f00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d219f00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d219f00

Branch: refs/heads/YARN-5734
Commit: 1d219f00ecdb074257bef21361cc3c33aa063edd
Parents: 7b3d27c
Author: Jonathan Hung 
Authored: Wed Mar 1 16:03:01 2017 -0800
Committer: Jonathan Hung 
Committed: Thu Mar 9 16:45:04 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 ++
 .../src/main/resources/yarn-default.xml | 12 +++
 .../scheduler/MutableConfigurationProvider.java | 35 
 .../scheduler/capacity/CapacityScheduler.java   | 14 ++-
 .../CapacitySchedulerConfiguration.java |  3 +
 .../capacity/conf/CSConfigurationProvider.java  |  3 +-
 .../conf/MutableCSConfigurationProvider.java| 94 
 .../conf/YarnConfigurationStoreFactory.java | 46 ++
 .../TestMutableCSConfigurationProvider.java | 83 +
 9 files changed, 291 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d219f00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b366855..91728a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -602,6 +602,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
   "org.apache.hadoop.yarn.LocalConfigurationProvider";
 
+  public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
+  YARN_PREFIX + "scheduler.configuration.store.class";
+  public static final String MEMORY_CONFIGURATION_STORE = "memory";
+  public static final String DEFAULT_CONFIGURATION_STORE =
+  MEMORY_CONFIGURATION_STORE;
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
   + "authorization-provider";
   private static final List RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d219f00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 645a342..2918bd7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3068,4 +3068,16 @@
 64
   
 
+  
+
+  The type of configuration store to use for storing scheduler
+  configurations, if using a mutable configuration provider.
+  Keywords such as "memory" map to certain configuration store
+  implementations. If keyword is not found, try to load this
+  value as a class.
+
+yarn.scheduler.configuration.store.class
+memory
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d219f00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
new file mode 100644
index 000..da30a2b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * 

[20/33] hadoop git commit: Revert "HADOOP-13606 swift FS to add a service load metadata file. Contributed by Steve Loughran"

2017-03-09 Thread jhung
Revert "HADOOP-13606 swift FS to add a service load metadata file. Contributed 
by Steve Loughran"

This reverts commit 53a12fa721bb431f7d481aac7d245c93efb56153.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98142d2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98142d2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98142d2f

Branch: refs/heads/YARN-5734
Commit: 98142d2f722e82d57b0e2bae6276f7c17fd99598
Parents: 5addacb
Author: John Zhuge 
Authored: Mon Mar 6 11:14:33 2017 -0800
Committer: John Zhuge 
Committed: Wed Mar 8 09:54:22 2017 -0800

--
 .../src/main/resources/core-default.xml |  6 ++
 .../services/org.apache.hadoop.fs.FileSystem| 16 
 2 files changed, 6 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98142d2f/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 52b58ed..f742ba8 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -850,6 +850,12 @@
 
 
 
+  fs.swift.impl
+  org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem
+  The implementation class of the OpenStack Swift 
Filesystem
+
+
+
   fs.automatic.close
   true
   By default, FileSystem instances are automatically closed at 
program

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98142d2f/hadoop-tools/hadoop-openstack/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-openstack/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-openstack/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 649ea31..000
--- 
a/hadoop-tools/hadoop-openstack/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/33] hadoop git commit: MAPREDUCE-6859. hadoop-mapreduce-client-jobclient.jar sets a main class that isn't in the JAR. Contributed by Daniel Templeton

2017-03-09 Thread jhung
MAPREDUCE-6859. hadoop-mapreduce-client-jobclient.jar sets a main class that 
isn't in the JAR. Contributed by Daniel Templeton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ebe8a6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ebe8a6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ebe8a6a

Branch: refs/heads/YARN-5734
Commit: 4ebe8a6a237258de9a7d8b041d78249bd3cca7a6
Parents: 1eb8186
Author: Jason Lowe 
Authored: Wed Mar 8 10:27:57 2017 -0600
Committer: Jason Lowe 
Committed: Wed Mar 8 10:27:57 2017 -0600

--
 .../hadoop-mapreduce-client-jobclient/pom.xml   | 12 +---
 1 file changed, 5 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebe8a6a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 5cecebb..1747f59 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -143,17 +143,15 @@
   
 **/hdfs-site.xml
   
+  
+
+  
org.apache.hadoop.test.MapredTestDriver
+
+  
 
 test-compile
   
 
-   
- 
-  
-   org.apache.hadoop.test.MapredTestDriver
- 
- 
-
   
   
 org.apache.maven.plugins


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/33] hadoop git commit: HDFS-10838. Last full block report received time for each DN should be easily discoverable. Contributed by Surendra Singh Lilhore.

2017-03-09 Thread jhung
HDFS-10838. Last full block report received time for each DN should be easily 
discoverable. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5adc5c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5adc5c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5adc5c3

Branch: refs/heads/YARN-5734
Commit: b5adc5c3011f111f86d232cb33ec522547f68a95
Parents: 5e74196
Author: Arpit Agarwal 
Authored: Mon Mar 6 16:39:53 2017 -0800
Committer: Arpit Agarwal 
Committed: Mon Mar 6 16:39:53 2017 -0800

--
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 52 +++-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  8 ++-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  2 +
 .../src/main/proto/hdfs.proto   |  2 +
 .../server/blockmanagement/BlockManager.java|  3 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  8 ++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  3 ++
 .../src/main/webapps/hdfs/dfshealth.html|  2 +
 .../src/main/webapps/hdfs/dfshealth.js  |  1 +
 .../server/namenode/TestNameNodeMXBean.java |  1 +
 10 files changed, 78 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5adc5c3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index acbcffa..e1698c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -85,6 +85,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
 
   protected AdminStates adminState;
   private long maintenanceExpireTimeInMS;
+  private long lastBlockReportTime;
+  private long lastBlockReportMonotonic;
 
   protected DatanodeInfo(DatanodeInfo from) {
 super(from);
@@ -101,6 +103,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.location = from.getNetworkLocation();
 this.adminState = from.getAdminState();
 this.upgradeDomain = from.getUpgradeDomain();
+this.lastBlockReportTime = from.getLastBlockReportTime();
+this.lastBlockReportMonotonic = from.getLastBlockReportMonotonic();
   }
 
   protected DatanodeInfo(DatanodeID nodeID) {
@@ -116,6 +120,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.lastUpdateMonotonic = 0L;
 this.xceiverCount = 0;
 this.adminState = null;
+this.lastBlockReportTime = 0L;
+this.lastBlockReportMonotonic = 0L;
   }
 
   protected DatanodeInfo(DatanodeID nodeID, String location) {
@@ -131,7 +137,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
   final long lastUpdate, final long lastUpdateMonotonic,
   final int xceiverCount, final String networkLocation,
-  final AdminStates adminState, final String upgradeDomain) {
+  final AdminStates adminState, final String upgradeDomain,
+  final long lastBlockReportTime, final long lastBlockReportMonotonic) {
 super(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
 ipcPort);
 this.capacity = capacity;
@@ -147,6 +154,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.location = networkLocation;
 this.adminState = adminState;
 this.upgradeDomain = upgradeDomain;
+this.lastBlockReportTime = lastBlockReportTime;
+this.lastBlockReportMonotonic = lastBlockReportMonotonic;
   }
 
   /** Network location name. */
@@ -391,6 +400,11 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 .append(percent2String(cacheRemainingPercent)).append("\n");
 buffer.append("Xceivers: ").append(getXceiverCount()).append("\n");
 buffer.append("Last contact: ").append(new Date(lastUpdate)).append("\n");
+buffer
+.append("Last Block Report: ")
+.append(
+lastBlockReportTime != 0 ? new Date(lastBlockReportTime) : "Never")
+.append("\n");
 return buffer.toString();
   }
 
@@ -503,6 +517,26 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 return this.maintenanceExpireTimeInMS;
   }
 
+  /** Sets the last block report time. */
+  public void setLastBlockReportTime(long lastBlockReportTime) {
+this.lastBlockReportTime = lastBlockReportTime;

[11/33] hadoop git commit: HDFS-11477. Simplify file IO profiling configuration. Contributed by Hanisha Koneru.

2017-03-09 Thread jhung
HDFS-11477. Simplify file IO profiling configuration. Contributed by Hanisha 
Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/959940b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/959940b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/959940b0

Branch: refs/heads/YARN-5734
Commit: 959940b0ab563b4e42bace44f1dc9a8babcaa889
Parents: f597f4c
Author: Arpit Agarwal 
Authored: Tue Mar 7 10:12:35 2017 -0800
Committer: Arpit Agarwal 
Committed: Tue Mar 7 10:12:35 2017 -0800

--
 .../hadoop-common/src/site/markdown/Metrics.md |  7 ++-
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  6 +-
 .../org/apache/hadoop/hdfs/server/common/Util.java | 17 +
 .../apache/hadoop/hdfs/server/datanode/DNConf.java |  7 ---
 .../hdfs/server/datanode/FileIoProvider.java   |  2 +-
 .../server/datanode/ProfilingFileIoEvents.java | 11 +--
 .../server/datanode/TestDataNodeVolumeMetrics.java |  4 ++--
 .../apache/hadoop/tools/TestHdfsConfigFields.java  |  2 --
 8 files changed, 36 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/959940b0/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 7900692..a8bdbeb 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -332,7 +332,12 @@ Each metrics record contains tags such as SessionId and 
Hostname as additional i
 FsVolume
 
 
-Per-volume metrics contain Datanode Volume IO related statistics. Per-volume 
metrics are off by default. They can be enbabled by setting 
`dfs.datanode.enable.fileio.profiling` to **true**, but enabling per-volume 
metrics may have a performance impact. Each metrics record contains tags such 
as Hostname as additional information along with metrics.
+Per-volume metrics contain Datanode Volume IO related statistics. Per-volume
+metrics are off by default. They can be enabled by setting `dfs.datanode
+.fileio.profiling.sampling.fraction` to a fraction between 0.0 and 1.0.
+Setting this value to 0.0 would mean profiling is not enabled. But enabling
+per-volume metrics may have a performance impact. Each metrics record
+contains tags such as Hostname as additional information along with metrics.
 
 | Name | Description |
 |: |: |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/959940b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index be20829..82d6073 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -718,10 +718,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT = 0; //no 
throttling
 
   // Datanode File IO Stats
-  public static final String DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY =
-  "dfs.datanode.enable.fileio.profiling";
-  public static final boolean DFS_DATANODE_ENABLE_FILEIO_PROFILING_DEFAULT =
-  false;
   public static final String DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY =
   "dfs.datanode.enable.fileio.fault.injection";
   public static final boolean
@@ -730,7 +726,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY =
   "dfs.datanode.fileio.profiling.sampling.fraction";
   public static final double
-  DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEAFULT = 1.0;
+  DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT = 0.0;
 
   //Keys with no defaults
   public static final String  DFS_DATANODE_PLUGINS_KEY = 
"dfs.datanode.plugins";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/959940b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index 9c67f0a..fdb09df 100644
--- 

[16/33] hadoop git commit: HADOOP-14150. Implement getHomeDirectory() method in NativeAzureFileSystem. Contributed by Santhosh G Nayak

2017-03-09 Thread jhung
HADOOP-14150. Implement getHomeDirectory() method in NativeAzureFileSystem. 
Contributed by Santhosh G Nayak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28daaf0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28daaf0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28daaf0e

Branch: refs/heads/YARN-5734
Commit: 28daaf0eb206d723d2baf0f9d91e43d98bb2fd26
Parents: 1598fd3
Author: Mingliang Liu 
Authored: Tue Mar 7 14:55:52 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Mar 7 14:55:52 2017 -0800

--
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28daaf0e/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 6de0a28..9aebbb5 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -84,6 +84,7 @@ import com.microsoft.azure.storage.StorageException;
 @InterfaceStability.Stable
 public class NativeAzureFileSystem extends FileSystem {
   private static final int USER_WX_PERMISION = 0300;
+  private static final String USER_HOME_DIR_PREFIX_DEFAULT = "/user";
   /**
* A description of a folder rename operation, including the source and
* destination keys, and descriptions of the files in the source folder.
@@ -1129,6 +1130,8 @@ public class NativeAzureFileSystem extends FileSystem {
*/
   private WasbAuthorizerInterface authorizer = null;
 
+  private UserGroupInformation ugi;
+
   private String delegationToken = null;
 
   public NativeAzureFileSystem() {
@@ -1247,6 +1250,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
 store.initialize(uri, conf, instrumentation);
 setConf(conf);
+this.ugi = UserGroupInformation.getCurrentUser();
 this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
 this.workingDir = new Path("/user", UserGroupInformation.getCurrentUser()
 .getShortUserName()).makeQualified(getUri(), getWorkingDirectory());
@@ -1276,6 +1280,12 @@ public class NativeAzureFileSystem extends FileSystem {
 }
   }
 
+  @Override
+  public Path getHomeDirectory() {
+return makeQualified(new Path(
+USER_HOME_DIR_PREFIX_DEFAULT + "/" + this.ugi.getShortUserName()));
+  }
+
   @VisibleForTesting
   public void updateWasbAuthorizer(WasbAuthorizerInterface authorizer) {
 this.authorizer = authorizer;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/33] hadoop git commit: YARN-5946: Create YarnConfigurationStore interface and InMemoryConfigurationStore class. Contributed by Jonathan Hung

2017-03-09 Thread jhung
YARN-5946: Create YarnConfigurationStore interface and
InMemoryConfigurationStore class. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b3d27c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b3d27c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b3d27c7

Branch: refs/heads/YARN-5734
Commit: 7b3d27c71a7f8ffa919c861a540a55b34bafa558
Parents: 37621cc
Author: Xuan 
Authored: Fri Feb 24 15:58:12 2017 -0800
Committer: Jonathan Hung 
Committed: Thu Mar 9 16:44:53 2017 -0800

--
 .../conf/InMemoryConfigurationStore.java|  86 +++
 .../capacity/conf/YarnConfigurationStore.java   | 154 +++
 .../conf/TestYarnConfigurationStore.java|  70 +
 3 files changed, 310 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b3d27c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
new file mode 100644
index 000..a208fb9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A default implementation of {@link YarnConfigurationStore}. Doesn't offer
+ * persistent configuration storage, just stores the configuration in memory.
+ */
+public class InMemoryConfigurationStore implements YarnConfigurationStore {
+
+  private Configuration schedConf;
+  private LinkedList pendingMutations;
+  private long pendingId;
+
+  @Override
+  public void initialize(Configuration conf, Configuration schedConf) {
+this.schedConf = schedConf;
+this.pendingMutations = new LinkedList<>();
+this.pendingId = 0;
+  }
+
+  @Override
+  public synchronized long logMutation(LogMutation logMutation) {
+logMutation.setId(++pendingId);
+pendingMutations.add(logMutation);
+return pendingId;
+  }
+
+  @Override
+  public synchronized boolean confirmMutation(long id, boolean isValid) {
+LogMutation mutation = pendingMutations.poll();
+// If confirmMutation is called out of order, discard mutations until id
+// is reached.
+while (mutation != null) {
+  if (mutation.getId() == id) {
+if (isValid) {
+  Map mutations = mutation.getUpdates();
+  for (Map.Entry kv : mutations.entrySet()) {
+schedConf.set(kv.getKey(), kv.getValue());
+  }
+}
+return true;
+  }
+  mutation = pendingMutations.poll();
+}
+return false;
+  }
+
+  @Override
+  public synchronized Configuration retrieve() {
+return schedConf;
+  }
+
+  @Override
+  public synchronized List getPendingMutations() {
+return pendingMutations;
+  }
+
+  @Override
+  public List getConfirmedConfHistory(long fromId) {
+// Unimplemented.
+return null;
+  }
+}


[22/33] hadoop git commit: HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with EOFException when RPC privacy is enabled. Contributed by Steven Rand

2017-03-09 Thread jhung
HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with 
EOFException when RPC privacy is enabled. Contributed by Steven Rand


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/241c1cc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/241c1cc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/241c1cc0

Branch: refs/heads/YARN-5734
Commit: 241c1cc05b71f8b719a85c06e3df930639630726
Parents: 287ba4f
Author: Jian He 
Authored: Wed Mar 8 10:48:27 2017 -0800
Committer: Jian He 
Committed: Wed Mar 8 10:48:27 2017 -0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java |  4 +++-
 .../yarn/client/api/impl/TestAMRMClient.java| 24 
 2 files changed, 27 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/241c1cc0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 70b902c..c0a5be9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1768,7 +1768,9 @@ public class Client implements AutoCloseable {
 }
 
 void setSaslClient(SaslRpcClient client) throws IOException {
-  setInputStream(client.getInputStream(in));
+  // Wrap the input stream in a BufferedInputStream to fill the buffer
+  // before reading its length (HADOOP-14062).
+  setInputStream(new BufferedInputStream(client.getInputStream(in)));
   setOutputStream(client.getOutputStream(out));
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/241c1cc0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 43c0271..a52963a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -137,6 +137,11 @@ public class TestAMRMClient {
 // set the minimum allocation so that resource decrease can go under 1024
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
 conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
+createClientAndCluster(conf);
+  }
+
+  private static void createClientAndCluster(Configuration conf)
+  throws Exception {
 yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), 
nodeCount, 1, 1);
 yarnCluster.init(conf);
 yarnCluster.start();
@@ -862,6 +867,25 @@ public class TestAMRMClient {
   }
 
   @Test (timeout=6)
+  public void testAMRMClientWithSaslEncryption() throws Exception {
+conf.set("hadoop.rpc.protection", "privacy");
+// we have to create a new instance of MiniYARNCluster to avoid SASL qop
+// mismatches between client and server
+tearDown();
+createClientAndCluster(conf);
+startApp();
+initAMRMClientAndTest(false);
+
+// recreate the original MiniYARNCluster and YarnClient for other tests
+conf.unset("hadoop.rpc.protection");
+tearDown();
+createClientAndCluster(conf);
+// unless we start an application the cancelApp() method will fail when
+// it runs after this test
+startApp();
+  }
+
+  @Test (timeout=6)
   public void testAMRMClientAllocReqId() throws YarnException, IOException {
 initAMRMClientAndTest(true);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/33] hadoop git commit: YARN-5951. Changes to allow CapacityScheduler to use configuration store

2017-03-09 Thread jhung
YARN-5951. Changes to allow CapacityScheduler to use configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37621ccd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37621ccd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37621ccd

Branch: refs/heads/YARN-5734
Commit: 37621ccd216b21788d49e863191579d9243c9526
Parents: e96a0b8
Author: Jonathan Hung 
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Jonathan Hung 
Committed: Thu Mar 9 16:44:53 2017 -0800

--
 .../scheduler/capacity/CapacityScheduler.java   | 36 +--
 .../CapacitySchedulerConfiguration.java | 10 +++
 .../capacity/conf/CSConfigurationProvider.java  | 46 ++
 .../conf/FileBasedCSConfigurationProvider.java  | 67 
 .../scheduler/capacity/conf/package-info.java   | 29 +
 .../capacity/TestCapacityScheduler.java |  4 +-
 6 files changed, 170 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37621ccd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index f6e7942..ed70b0b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -104,6 +103,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -162,6 +163,7 @@ public class CapacityScheduler extends
 
   private int offswitchPerHeartbeatLimit;
 
+  private CSConfigurationProvider csConfProvider;
 
   @Override
   public void setConf(Configuration conf) {
@@ -285,7 +287,18 @@ public class CapacityScheduler extends
   IOException {
 try {
   writeLock.lock();
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  String confProviderStr = configuration.get(
+  CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+  CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+  if (confProviderStr.equals(
+  CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+  } else {
+throw new IOException("Invalid CS configuration provider: " +
+confProviderStr);
+  }
+  this.csConfProvider.init(configuration);
+  this.conf = this.csConfProvider.loadConfiguration(configuration);
   validateConf(this.conf);
   this.minimumAllocation = this.conf.getMinimumAllocation();
   initMaximumResourceCapability(this.conf.getMaximumAllocation());
@@ -392,7 +405,7 @@ public class CapacityScheduler extends
   writeLock.lock();
   Configuration configuration = new Configuration(newConf);
   CapacitySchedulerConfiguration oldConf = this.conf;
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  this.conf = 

[02/33] hadoop git commit: YARN-5665. Enhance documentation for yarn.resourcemanager.scheduler.class property. (Yufei Gu via rchiang)

2017-03-09 Thread jhung
YARN-5665. Enhance documentation for yarn.resourcemanager.scheduler.class 
property. (Yufei Gu via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9dc444d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9dc444d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9dc444d

Branch: refs/heads/YARN-5734
Commit: d9dc444dc73fbe23f9e553d63baf83f12c636fa7
Parents: ec839b9
Author: Ray Chiang 
Authored: Mon Mar 6 14:02:49 2017 -0800
Committer: Ray Chiang 
Committed: Mon Mar 6 14:02:49 2017 -0800

--
 .../hadoop-common/src/site/markdown/ClusterSetup.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9dc444d/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index 1d9e9da..7be6a19 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -139,7 +139,7 @@ This section deals with important parameters to be 
specified in the given config
 | `yarn.resourcemanager.admin.address` | `ResourceManager` host:port for 
administrative commands. | *host:port* If set, overrides the hostname set in 
`yarn.resourcemanager.hostname`. |
 | `yarn.resourcemanager.webapp.address` | `ResourceManager` web-ui host:port. 
| *host:port* If set, overrides the hostname set in 
`yarn.resourcemanager.hostname`. |
 | `yarn.resourcemanager.hostname` | `ResourceManager` host. | *host* Single 
hostname that can be set in place of setting all `yarn.resourcemanager*address` 
resources. Results in default ports for ResourceManager components. |
-| `yarn.resourcemanager.scheduler.class` | `ResourceManager` Scheduler class. 
| `CapacityScheduler` (recommended), `FairScheduler` (also recommended), or 
`FifoScheduler` |
+| `yarn.resourcemanager.scheduler.class` | `ResourceManager` Scheduler class. 
| `CapacityScheduler` (recommended), `FairScheduler` (also recommended), or 
`FifoScheduler`. Use a fully qualified class name, e.g., 
`org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler`. |
 | `yarn.scheduler.minimum-allocation-mb` | Minimum limit of memory to allocate 
to each container request at the `Resource Manager`. | In MBs |
 | `yarn.scheduler.maximum-allocation-mb` | Maximum limit of memory to allocate 
to each container request at the `Resource Manager`. | In MBs |
 | `yarn.resourcemanager.nodes.include-path` / 
`yarn.resourcemanager.nodes.exclude-path` | List of permitted/excluded 
NodeManagers. | If necessary, use these files to control the list of allowable 
NodeManagers. |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/33] hadoop git commit: Revert "HADOOP-13930. Azure: Add Authorization support to WASB. Contributed by Sivaguru Sankaridurg and Dushyanth"

2017-03-09 Thread jhung
Revert "HADOOP-13930. Azure: Add Authorization support to WASB. Contributed by 
Sivaguru Sankaridurg and Dushyanth"

This reverts commit 6b7cd62b8cf12616b13142f2eb2cfc2f25796f0f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52d7d5aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52d7d5aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52d7d5aa

Branch: refs/heads/YARN-5734
Commit: 52d7d5aa1a303cf70519a61487641211f4267c6f
Parents: c571cda
Author: Mingliang Liu 
Authored: Mon Mar 6 17:10:11 2017 -0800
Committer: Mingliang Liu 
Committed: Mon Mar 6 17:10:11 2017 -0800

--
 .../src/main/resources/core-default.xml |  10 -
 .../conf/TestCommonConfigurationFields.java |   2 -
 .../fs/azure/AzureNativeFileSystemStore.java|   4 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 155 +--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java | 183 
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 247 -
 .../fs/azure/SecureStorageInterfaceImpl.java|   6 +-
 .../fs/azure/WasbAuthorizationException.java|  40 ---
 .../fs/azure/WasbAuthorizationOperations.java   |  44 ---
 .../fs/azure/WasbAuthorizerInterface.java   |  47 
 .../hadoop/fs/azure/security/Constants.java |  54 
 .../security/WasbDelegationTokenIdentifier.java |  48 
 .../fs/azure/security/WasbTokenRenewer.java | 124 -
 .../hadoop/fs/azure/security/package.html   |  28 --
 ...apache.hadoop.security.token.TokenIdentifier |  16 --
 ...rg.apache.hadoop.security.token.TokenRenewer |  16 --
 .../hadoop-azure/src/site/markdown/index.md |  34 ---
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 102 ---
 .../TestNativeAzureFileSystemAuthorization.java | 277 ---
 19 files changed, 64 insertions(+), 1373 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d7d5aa/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 52b58ed..35be56b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1292,16 +1292,6 @@
 to specify the time (such as 2s, 2m, 1h, etc.).
   
 
-
-  fs.azure.authorization
-  false
-  
-Config flag to enable authorization support in WASB. Setting it to "true" 
enables
-authorization support to WASB. Currently WASB authorization requires a 
remote service
-to provide authorization that needs to be specified via 
fs.azure.authorization.remote.service.url
-configuration
-  
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d7d5aa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 7410d29..966a8ac 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -181,8 +181,6 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("io.compression.codec.bzip2.library");
 // - org.apache.hadoop.io.SequenceFile
 xmlPropsToSkipCompare.add("io.seqfile.local.dir");
-// - org.apache.hadoop.fs.azure.NativeAzureFileSystem
-xmlPropsToSkipCompare.add("fs.azure.authorization");
 
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d7d5aa/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 9d7ac80..07c389c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -303,7 +303,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   private 

[03/33] hadoop git commit: HDFS-11498. Make RestCsrfPreventionHandler and WebHdfsHandler compatible with Netty 4.0.

2017-03-09 Thread jhung
HDFS-11498. Make RestCsrfPreventionHandler and WebHdfsHandler compatible with 
Netty 4.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e74196e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e74196e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e74196e

Branch: refs/heads/YARN-5734
Commit: 5e74196ede9bfc20eb6d6fe3aa6a0e5c47a40fdd
Parents: d9dc444
Author: Andrew Wang 
Authored: Mon Mar 6 15:04:13 2017 -0800
Committer: Andrew Wang 
Committed: Mon Mar 6 15:04:13 2017 -0800

--
 .../web/RestCsrfPreventionFilterHandler.java|  6 ++--
 .../datanode/web/webhdfs/WebHdfsHandler.java| 33 ++--
 hadoop-project/pom.xml  |  2 +-
 3 files changed, 20 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e74196e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
index f2f0533..4958bb5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.web;
 
-import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
-import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
+import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
 import static 
io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
 import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
 
@@ -119,7 +119,7 @@ final class RestCsrfPreventionFilterHandler
 
 @Override
 public String getMethod() {
-  return req.method().name();
+  return req.getMethod().name();
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e74196e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index d2b2ec2..c5fc7ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -29,17 +29,6 @@ import io.netty.handler.codec.http.HttpMethod;
 import io.netty.handler.codec.http.HttpRequest;
 import io.netty.handler.codec.http.QueryStringDecoder;
 import io.netty.handler.stream.ChunkedStream;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.charset.StandardCharsets;
-import java.security.PrivilegedExceptionAction;
-import java.util.EnumSet;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -63,17 +52,27 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.LimitInputStream;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.security.PrivilegedExceptionAction;
+import java.util.EnumSet;
+
+import static io.netty.handler.codec.http.HttpHeaders.Names.ACCEPT;
+import static 
io.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
 import static 
io.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
 import static 
io.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN;
+import static 
io.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_MAX_AGE;
 import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
 import static 

[01/33] hadoop git commit: HDFS-11441. Add escaping to error message in KMS web UI. Contributed by Aaron T. Myers. [Forced Update!]

2017-03-09 Thread jhung
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 01ea2f3fb -> 1d219f00e (forced update)


HDFS-11441. Add escaping to error message in KMS web UI. Contributed by Aaron 
T. Myers.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec839b94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec839b94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec839b94

Branch: refs/heads/YARN-5734
Commit: ec839b94c0eb3f09e74f8a3b0bc9a08b3f5418b2
Parents: 209ecd1
Author: Andrew Wang 
Authored: Mon Mar 6 10:47:15 2017 -0800
Committer: Andrew Wang 
Committed: Mon Mar 6 10:47:15 2017 -0800

--
 .../hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec839b94/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
index 45e48e9..3e98a25 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.http.HtmlQuoting;
 import 
org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import 
org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
@@ -105,7 +106,7 @@ public class KMSAuthenticationFilter
 public void sendError(int sc, String msg) throws IOException {
   statusCode = sc;
   this.msg = msg;
-  super.sendError(sc, msg);
+  super.sendError(sc, HtmlQuoting.quoteHtmlChars(msg));
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/33] hadoop git commit: Revert "HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with EOFException when RPC privacy is enabled. Contributed by Steven Rand"

2017-03-09 Thread jhung
Revert "HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with 
EOFException when RPC privacy is enabled. Contributed by Steven Rand"

This reverts commit 241c1cc05b71f8b719a85c06e3df930639630726.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2be8947d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2be8947d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2be8947d

Branch: refs/heads/YARN-5734
Commit: 2be8947d12714c49ef7a90de82a351d086b435b6
Parents: 241c1cc
Author: Jian He 
Authored: Wed Mar 8 13:20:01 2017 -0800
Committer: Jian He 
Committed: Wed Mar 8 13:20:01 2017 -0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java |  4 +---
 .../yarn/client/api/impl/TestAMRMClient.java| 24 
 2 files changed, 1 insertion(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be8947d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index c0a5be9..70b902c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1768,9 +1768,7 @@ public class Client implements AutoCloseable {
 }
 
 void setSaslClient(SaslRpcClient client) throws IOException {
-  // Wrap the input stream in a BufferedInputStream to fill the buffer
-  // before reading its length (HADOOP-14062).
-  setInputStream(new BufferedInputStream(client.getInputStream(in)));
+  setInputStream(client.getInputStream(in));
   setOutputStream(client.getOutputStream(out));
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be8947d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index a52963a..43c0271 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -137,11 +137,6 @@ public class TestAMRMClient {
 // set the minimum allocation so that resource decrease can go under 1024
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
 conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
-createClientAndCluster(conf);
-  }
-
-  private static void createClientAndCluster(Configuration conf)
-  throws Exception {
 yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), 
nodeCount, 1, 1);
 yarnCluster.init(conf);
 yarnCluster.start();
@@ -867,25 +862,6 @@ public class TestAMRMClient {
   }
 
   @Test (timeout=6)
-  public void testAMRMClientWithSaslEncryption() throws Exception {
-conf.set("hadoop.rpc.protection", "privacy");
-// we have to create a new instance of MiniYARNCluster to avoid SASL qop
-// mismatches between client and server
-tearDown();
-createClientAndCluster(conf);
-startApp();
-initAMRMClientAndTest(false);
-
-// recreate the original MiniYARNCluster and YarnClient for other tests
-conf.unset("hadoop.rpc.protection");
-tearDown();
-createClientAndCluster(conf);
-// unless we start an application the cancelApp() method will fail when
-// it runs after this test
-startApp();
-  }
-
-  @Test (timeout=6)
   public void testAMRMClientAllocReqId() throws YarnException, IOException {
 initAMRMClientAndTest(true);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/33] hadoop git commit: Treat encrypted files as private. Contributed by Daniel Templeton.

2017-03-09 Thread jhung
Treat encrypted files as private. Contributed by Daniel Templeton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f01a69f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f01a69f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f01a69f8

Branch: refs/heads/YARN-5734
Commit: f01a69f84f4cc7d925d078a7ce32e5800da4e429
Parents: 1441398
Author: Akira Ajisaka 
Authored: Tue Mar 7 13:22:11 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 7 13:22:11 2017 +0900

--
 .../filecache/ClientDistributedCacheManager.java   | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f01a69f8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
index 73a0330..9f8edb5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
@@ -294,10 +294,21 @@ public class ClientDistributedCacheManager {
   FsAction action, Map statCache) throws IOException {
 FileStatus status = getFileStatus(fs, path.toUri(), statCache);
 FsPermission perms = status.getPermission();
-FsAction otherAction = perms.getOtherAction();
-if (otherAction.implies(action)) {
-  return true;
+
+// Encrypted files are always treated as private. This stance has two
+// important side effects.  The first is that the encrypted files will be
+// downloaded as the job owner instead of the YARN user, which is required
+// for the KMS ACLs to work as expected.  Second, it prevent a file with
+// world readable permissions that is stored in an encryption zone from
+// being localized as a publicly shared file with world readable
+// permissions.
+if (!perms.getEncryptedBit()) {
+  FsAction otherAction = perms.getOtherAction();
+  if (otherAction.implies(action)) {
+return true;
+  }
 }
+
 return false;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5669. Add support for docker pull command (Contribtued by luhuichun)

2017-03-09 Thread sidharta
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 acf20c831 -> 3b6e5ef91


YARN-5669. Add support for docker pull command (Contribtued by luhuichun)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b6e5ef9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b6e5ef9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b6e5ef9

Branch: refs/heads/branch-2
Commit: 3b6e5ef917c0afea376a82213e5072b626f1d08e
Parents: acf20c8
Author: Sidharta S 
Authored: Thu Mar 9 16:22:19 2017 -0800
Committer: Sidharta S 
Committed: Thu Mar 9 16:23:21 2017 -0800

--
 .../linux/runtime/docker/DockerPullCommand.java | 31 +
 .../runtime/docker/TestDockerPullCommand.java   | 49 
 2 files changed, 80 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b6e5ef9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
new file mode 100644
index 000..351e09e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+/**
+ * Encapsulates the docker pull command and its command
+ * line arguments.
+ */
+public class DockerPullCommand extends DockerCommand {
+  private static final String PULL_COMMAND = "pull";
+
+  public DockerPullCommand(String imageName) {
+super(PULL_COMMAND);
+super.addCommandArguments(imageName);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b6e5ef9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
new file mode 100644
index 000..89157ff
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 

hadoop git commit: YARN-5669. Add support for docker pull command (Contribtued by luhuichun)

2017-03-09 Thread sidharta
Repository: hadoop
Updated Branches:
  refs/heads/trunk 822a74f2a -> e96a0b8c9


YARN-5669. Add support for docker pull command (Contribtued by luhuichun)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e96a0b8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e96a0b8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e96a0b8c

Branch: refs/heads/trunk
Commit: e96a0b8c92b46aed7c1f5ccec13abc6c1043edba
Parents: 822a74f
Author: Sidharta S 
Authored: Thu Mar 9 16:22:19 2017 -0800
Committer: Sidharta S 
Committed: Thu Mar 9 16:22:19 2017 -0800

--
 .../linux/runtime/docker/DockerPullCommand.java | 31 +
 .../runtime/docker/TestDockerPullCommand.java   | 49 
 2 files changed, 80 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e96a0b8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
new file mode 100644
index 000..351e09e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+/**
+ * Encapsulates the docker pull command and its command
+ * line arguments.
+ */
+public class DockerPullCommand extends DockerCommand {
+  private static final String PULL_COMMAND = "pull";
+
+  public DockerPullCommand(String imageName) {
+super(PULL_COMMAND);
+super.addCommandArguments(imageName);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e96a0b8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
new file mode 100644
index 000..89157ff
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 

[12/20] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad99438f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
new file mode 100644
index 000..b420daa
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+
+import com.squareup.okhttp.mockwebserver.MockResponse;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Stub adl server and test acl data conversion within SDK and Hadoop adl
+ * client.
+ */
+public class TestACLFeatures extends AdlMockWebServer {
+
+  @Test(expected=AccessControlException.class)
+  public void testModifyAclEntries() throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+aclEntryBuilder.setPermission(FsAction.ALL);
+aclEntryBuilder.setScope(AclEntryScope.ACCESS);
+entries.add(aclEntryBuilder.build());
+
+aclEntryBuilder.setName("hdfs");
+aclEntryBuilder.setType(AclEntryType.GROUP);
+aclEntryBuilder.setPermission(FsAction.READ_WRITE);
+aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
+entries.add(aclEntryBuilder.build());
+
+getMockAdlFileSystem().modifyAclEntries(new Path("/test1/test2"), entries);
+
+getMockServer().enqueue(new MockResponse().setResponseCode(403)
+.setBody(TestADLResponseData.getAccessControlException()));
+
+getMockAdlFileSystem()
+.modifyAclEntries(new Path("/test1/test2"), entries);
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testRemoveAclEntriesWithOnlyUsers()
+  throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+entries.add(aclEntryBuilder.build());
+
+getMockAdlFileSystem().removeAclEntries(new Path("/test1/test2"), entries);
+
+getMockServer().enqueue(new MockResponse().setResponseCode(403)
+.setBody(TestADLResponseData.getAccessControlException()));
+
+getMockAdlFileSystem()
+.removeAclEntries(new Path("/test1/test2"), entries);
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testRemoveAclEntries() throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+aclEntryBuilder.setPermission(FsAction.ALL);
+aclEntryBuilder.setScope(AclEntryScope.ACCESS);
+entries.add(aclEntryBuilder.build());
+
+aclEntryBuilder.setName("hdfs");
+aclEntryBuilder.setType(AclEntryType.GROUP);
+aclEntryBuilder.setPermission(FsAction.READ_WRITE);
+aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
+entries.add(aclEntryBuilder.build());
+
+

[14/20] hadoop git commit: HADOOP-13900. Remove snapshot version of SDK dependency from Azure Data Lake Store File System. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-13900. Remove snapshot version of SDK dependency from Azure Data Lake 
Store File System. Contributed by Vishwajeet Dusane

(cherry picked from commit ef34bf2bb92a4e8def6617b185ae72db81450de8)
(cherry picked from commit 881b63499e10b808b3c58ca34e8d4ad5a6634ed5)
(cherry picked from commit 8a51e4f89d8e57311bccac031690f7c79fc3305b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4dc0eb4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4dc0eb4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4dc0eb4a

Branch: refs/heads/branch-2.8.0
Commit: 4dc0eb4a47ccb44235d255d4a5f602db7945ad34
Parents: 181df13
Author: Mingliang Liu 
Authored: Tue Dec 13 10:34:32 2016 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:24:45 2017 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml | 10 +-
 1 file changed, 1 insertion(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4dc0eb4a/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 0a21062..186f3b9 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -35,14 +35,6 @@
 UTF-8
 true
   
-  
-
-  snapshots-repo
-  https://oss.sonatype.org/content/repositories/snapshots
-  false
-  true
-
-  
   
 
   
@@ -129,7 +121,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.0.4-SNAPSHOT
+  2.0.11
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/20] hadoop git commit: HADOOP-13929. ADLS connector should not check in contract-test-options.xml. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13929. ADLS connector should not check in contract-test-options.xml. 
(John Zhuge via lei)

(cherry picked from commit 71c23c9fc94cfdf58de80effbc3f51c0925d0cfe)
(cherry picked from commit ed4388f903d34a2c09845d200e1e717397b217e1)
(cherry picked from commit 674b9cdab1e753a070dfb104f499d829f7ba4094)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b11f159
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b11f159
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b11f159

Branch: refs/heads/branch-2.8.0
Commit: 9b11f159ba302cbb4b74cb99cafcb0b724d5cb68
Parents: 9e101ca
Author: Lei Xu 
Authored: Mon Feb 13 13:33:13 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:24:45 2017 -0800

--
 .gitignore  | 10 ++--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  2 +-
 .../src/site/markdown/index.md  |  4 +-
 .../fs/adl/live/AdlStorageConfiguration.java| 42 +++---
 .../src/test/resources/adls.xml | 11 
 .../test/resources/contract-test-options.xml| 61 
 6 files changed, 39 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b11f159/.gitignore
--
diff --git a/.gitignore b/.gitignore
index eb1fc96..2a82ba7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,10 @@
 target
 build
 
+# Filesystem contract test options and credentials
+auth-keys.xml
+azure-auth-keys.xml
+
 # External tool builders
 */.externalToolBuilders
 */maven-eclipse.xml
@@ -22,12 +26,6 @@ build
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
-hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
 yarnregistry.pdf
-hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
-hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b11f159/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 3d41025..303b7bc 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -72,7 +72,7 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
-  static final String SCHEME = "adl";
+  public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
   private String userName;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b11f159/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ced5cff..5037db6 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -224,7 +224,9 @@ commands demonstrate access to a storage account named 
`youraccount`.
 ## Testing the 
azure-datalake-store Module
 The hadoop-azure module includes a full suite of unit tests. Most of the tests 
will run without additional configuration by running mvn test. This includes 
tests against mocked storage, which is an in-memory emulation of Azure Data 
Lake Storage.
 
-A selection of tests can run against the Azure Data Lake Storage. To run tests 
against Adl storage. Please configure contract-test-options.xml with Adl 
account information mentioned in the above sections. Also turn on contract test 
execution flag to trigger tests against Azure Data Lake Storage.
+A selection of tests can run against the Azure Data Lake Storage. To run these
+tests, please create `src/test/resources/auth-keys.xml` with Adl account
+information mentioned in the above sections and the following properties.
 
 
   

[10/20] hadoop git commit: HADOOP-14017. User friendly name for ADLS user and group. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-14017. User friendly name for ADLS user and group. Contributed by 
Vishwajeet Dusane

(cherry picked from commit 924def78544a64449785f305cb6984c3559aea4d)
(cherry picked from commit acf20c8318270eaa8fdf6259ec7b07344e3bd7e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/518705b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/518705b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/518705b8

Branch: refs/heads/branch-2.8
Commit: 518705b8e3b6435894def73dc37104cdba38fe00
Parents: f0df481
Author: Mingliang Liu 
Authored: Tue Feb 21 13:44:42 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:21:33 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  4 +++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 23 +++---
 .../src/site/markdown/index.md  | 26 +++
 .../fs/adl/TestValidateConfiguration.java   |  9 ++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 33 
 5 files changed, 91 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/518705b8/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 21120df..7d31103 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -87,6 +87,10 @@ public final class AdlConfKeys {
   "adl.feature.support.acl.bit";
   static final boolean ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION_DEFAULT = true;
 
+  static final String ADL_ENABLEUPN_FOR_OWNERGROUP_KEY =
+  "adl.feature.ownerandgroup.enableupn";
+  static final boolean ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT = false;
+
   private AdlConfKeys() {
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/518705b8/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index fb0feda..e0e273e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -32,6 +32,7 @@ import com.microsoft.azure.datalake.store.DirectoryEntry;
 import com.microsoft.azure.datalake.store.DirectoryEntryType;
 import com.microsoft.azure.datalake.store.IfExists;
 import com.microsoft.azure.datalake.store.LatencyTracker;
+import com.microsoft.azure.datalake.store.UserGroupRepresentation;
 import com.microsoft.azure.datalake.store.oauth2.AccessTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
@@ -80,6 +81,8 @@ public class AdlFileSystem extends FileSystem {
   private ADLStoreClient adlClient;
   private Path workingDirectory;
   private boolean aclBitStatus;
+  private UserGroupRepresentation oidOrUpn;
+
 
   // retained for tests
   private AccessTokenProvider tokenProvider;
@@ -181,6 +184,11 @@ public class AdlFileSystem extends FileSystem {
 if (!trackLatency) {
   LatencyTracker.disable();
 }
+
+boolean enableUPN = conf.getBoolean(ADL_ENABLEUPN_FOR_OWNERGROUP_KEY,
+ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT);
+oidOrUpn = enableUPN ? UserGroupRepresentation.UPN :
+UserGroupRepresentation.OID;
   }
 
   /**
@@ -439,7 +447,8 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public FileStatus getFileStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
-DirectoryEntry entry = adlClient.getDirectoryEntry(toRelativeFilePath(f));
+DirectoryEntry entry =
+adlClient.getDirectoryEntry(toRelativeFilePath(f), oidOrUpn);
 return toFileStatus(entry, f);
   }
 
@@ -456,7 +465,7 @@ public class AdlFileSystem extends FileSystem {
   public FileStatus[] listStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
 List entries =
-adlClient.enumerateDirectory(toRelativeFilePath(f));
+adlClient.enumerateDirectory(toRelativeFilePath(f), oidOrUpn);
 return toFileStatuses(entries, f);
   }
 
@@ -749,8 +758,8 @@ 

[05/20] hadoop git commit: HADOOP-13900. Remove snapshot version of SDK dependency from Azure Data Lake Store File System. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-13900. Remove snapshot version of SDK dependency from Azure Data Lake 
Store File System. Contributed by Vishwajeet Dusane

(cherry picked from commit ef34bf2bb92a4e8def6617b185ae72db81450de8)
(cherry picked from commit 881b63499e10b808b3c58ca34e8d4ad5a6634ed5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a51e4f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a51e4f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a51e4f8

Branch: refs/heads/branch-2.8
Commit: 8a51e4f89d8e57311bccac031690f7c79fc3305b
Parents: f1671e9
Author: Mingliang Liu 
Authored: Tue Dec 13 10:34:32 2016 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:21:31 2017 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml | 10 +-
 1 file changed, 1 insertion(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a51e4f8/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index ef658a7..2944922 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -35,14 +35,6 @@
 UTF-8
 true
   
-  
-
-  snapshots-repo
-  https://oss.sonatype.org/content/repositories/snapshots
-  false
-  true
-
-  
   
 
   
@@ -129,7 +121,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.0.4-SNAPSHOT
+  2.0.11
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/20] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad99438f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
new file mode 100644
index 000..8474e9c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Verify Adls CONCAT semantics compliance with Hadoop.
+ */
+public class TestAdlContractConcatLive extends AbstractContractConcatTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+return new AdlStorageContract(configuration);
+  }
+
+  @Before
+  @Override
+  public void setup() throws Exception {
+org.junit.Assume
+.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+super.setup();
+  }
+
+  @Test
+  public void testConcatMissingTarget() throws Throwable {
+ContractTestUtils.unsupported("BUG : Adl to support expectation from "
++ "concat on missing targets.");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad99438f/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
new file mode 100644
index 000..907c50c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Verify Adls CREATE semantics compliance with Hadoop.
+ */
+public class TestAdlContractCreateLive extends AbstractContractCreateTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+return new AdlStorageContract(configuration);
+  }
+
+  @Before
+  @Override
+  public void setup() throws Exception {
+org.junit.Assume
+.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+super.setup();
+  }
+
+  @Test
+  public void testOverwriteEmptyDirectory() throws Throwable {
+ContractTestUtils
+.unsupported("BUG : Adl to support override empty " + 

[08/20] hadoop git commit: HADOOP-13956. Read ADLS credentials from Credential Provider. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13956. Read ADLS credentials from Credential Provider. (John Zhuge via 
lei)

(cherry picked from commit e015b563197a475e354bf84fd27e7bbcc67e00a4)
(cherry picked from commit 01624f0c1b7eba9fe4b5778e1da9d38545af3722)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5eaec560
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5eaec560
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5eaec560

Branch: refs/heads/branch-2.8
Commit: 5eaec560d98fe127b5506c50b4c079d14b28b5ef
Parents: 8a51e4f
Author: Lei Xu 
Authored: Fri Jan 20 14:34:02 2017 +0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:21:32 2017 -0800

--
 .../src/site/markdown/CredentialProviderAPI.md  |   1 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  37 -
 .../src/site/markdown/index.md  |  44 ++
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 152 +++
 4 files changed, 226 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5eaec560/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
index 209b48d..de871b1 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
@@ -102,6 +102,7 @@ In summary, first, provision the credentials into a 
provider then configure the
 |YARN |WebAppUtils uptakes the use of the credential provider 
API through the new method on Configuration called getPassword. This provides 
an alternative to storing the passwords in clear text within the ssl-server.xml 
file while maintaining backward compatibility.|TODO|
 |AWS  S3/S3A |Uses Configuration.getPassword to get the S3 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[AWS S3/S3A 
Usage](../../hadoop-aws/tools/hadoop-aws/index.html)|
 |Azure  WASB |Uses Configuration.getPassword to get the WASB 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[Azure WASB 
Usage](../../hadoop-azure/index.html)|
+|Azure  ADLS |Uses Configuration.getPassword to get the ADLS 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[Azure ADLS 
Usage](../../hadoop-azure-datalake/index.html)|
 |Apache  Accumulo|The trace.password property is used by the Tracer to 
authenticate with Accumulo and persist the traces in the trace table. The 
credential provider API is used to acquire the trace.password from a provider 
or from configuration for backward compatibility.|TODO|
 |Apache  Slider  |A capability has been added to Slider to prompt the 
user for needed passwords and store them using CredentialProvider so they can 
be retrieved by an app later.|TODO|
 |Apache  Hive|Protection of the metastore password, SSL related 
passwords and JDO string password has been added through the use of the 
Credential Provider API|TODO|

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5eaec560/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index bd43c52..3d41025 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -58,10 +58,12 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.VersionInfo;
+
 import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 
 /**
@@ -224,8 +226,10 @@ public class AdlFileSystem extends FileSystem {
 return azureTokenProvider;
   }
 
-  private AccessTokenProvider getAccessTokenProvider(Configuration conf)
+  private AccessTokenProvider 

[16/20] hadoop git commit: HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet 
Dusane

(cherry picked from commit 4113ec5fa5ca049ebaba039b1faf3911c6a34f7b)
(cherry picked from commit 7fd0556b2bcc4eb18c5301a580646a2d7d502b50)
(cherry picked from commit f1671e951931f14c84616bd4072bb20411e258b0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/181df134
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/181df134
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/181df134

Branch: refs/heads/branch-2.8.0
Commit: 181df134013f765400575b9f871be681eee639de
Parents: ad99438
Author: Mingliang Liu 
Authored: Fri Dec 2 15:54:57 2016 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:24:45 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  24 +-
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   |   6 +-
 .../apache/hadoop/fs/adl/TestListStatus.java|   6 +-
 .../fs/adl/live/TestAdlContractAppendLive.java  |  11 +-
 .../fs/adl/live/TestAdlContractConcatLive.java  |  23 +-
 .../fs/adl/live/TestAdlContractCreateLive.java  |  19 +-
 .../fs/adl/live/TestAdlContractDeleteLive.java  |  11 +-
 .../live/TestAdlContractGetFileStatusLive.java  |  36 ++
 .../fs/adl/live/TestAdlContractMkdirLive.java   |  25 +-
 .../fs/adl/live/TestAdlContractOpenLive.java|  11 +-
 .../fs/adl/live/TestAdlContractRenameLive.java  |  30 +-
 .../fs/adl/live/TestAdlContractRootDirLive.java |  19 +-
 .../fs/adl/live/TestAdlContractSeekLive.java|  11 +-
 .../live/TestAdlDifferentSizeWritesLive.java|  69 ++--
 .../live/TestAdlFileContextCreateMkdirLive.java |  67 
 .../TestAdlFileContextMainOperationsLive.java   |  99 ++
 .../adl/live/TestAdlFileSystemContractLive.java |  38 +--
 .../live/TestAdlInternalCreateNonRecursive.java | 134 
 .../fs/adl/live/TestAdlPermissionLive.java  | 116 +++
 .../adl/live/TestAdlSupportedCharsetInPath.java | 336 +++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 111 ++
 21 files changed, 996 insertions(+), 206 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/181df134/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 9083afc..bd43c52 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -346,7 +346,6 @@ public class AdlFileSystem extends FileSystem {
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
-  @Deprecated
   @Override
   public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
   EnumSet flags, int bufferSize, short replication,
@@ -471,6 +470,10 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public boolean rename(final Path src, final Path dst) throws IOException {
 statistics.incrementWriteOps(1);
+if (toRelativeFilePath(src).equals("/")) {
+  return false;
+}
+
 return adlClient.rename(toRelativeFilePath(src), toRelativeFilePath(dst));
   }
 
@@ -522,9 +525,24 @@ public class AdlFileSystem extends FileSystem {
   public boolean delete(final Path path, final boolean recursive)
   throws IOException {
 statistics.incrementWriteOps(1);
+String relativePath = toRelativeFilePath(path);
+// Delete on root directory not supported.
+if (relativePath.equals("/")) {
+  // This is important check after recent commit
+  // HADOOP-12977 and HADOOP-13716 validates on root for
+  // 1. if root is empty and non recursive delete then return false.
+  // 2. if root is non empty and non recursive delete then throw exception.
+  if (!recursive
+  && adlClient.enumerateDirectory(toRelativeFilePath(path), 1).size()
+  > 0) {
+throw new IOException("Delete on root is not supported.");
+  }
+  return false;
+}
+
 return recursive ?
-adlClient.deleteRecursive(toRelativeFilePath(path)) :
-adlClient.delete(toRelativeFilePath(path));
+adlClient.deleteRecursive(relativePath) :
+adlClient.delete(relativePath);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/181df134/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
--
diff --git 

[13/20] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. 
Contributed by Vishwajeet Dusane

(cherry picked from commit edf149b9790a96563fe7bba289a040542c8ab8f2)
(cherry picked from commit b4b4ca9199fbdcd172be995c5b9b1ff3c468da89)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad99438f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad99438f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad99438f

Branch: refs/heads/branch-2.8.0
Commit: ad99438f5c0945b86524d00e05e5123471f78c83
Parents: 66e062e
Author: Chris Douglas 
Authored: Wed Mar 8 23:18:28 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:24:35 2017 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml  | 168 
 .../main/java/org/apache/hadoop/fs/adl/Adl.java |  56 ++
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  92 ++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 923 +++
 .../apache/hadoop/fs/adl/AdlFsInputStream.java  | 149 +++
 .../apache/hadoop/fs/adl/AdlFsOutputStream.java |  82 ++
 .../org/apache/hadoop/fs/adl/AdlPermission.java |  69 ++
 .../hadoop/fs/adl/SdkTokenProviderAdapter.java  |  41 +
 .../apache/hadoop/fs/adl/TokenProviderType.java |  25 +
 .../fs/adl/oauth2/AzureADTokenProvider.java |  70 ++
 .../hadoop/fs/adl/oauth2/package-info.java  |  23 +
 .../org/apache/hadoop/fs/adl/package-info.java  |  23 +
 .../META-INF/org.apache.hadoop.fs.FileSystem|  16 +
 .../src/site/markdown/index.md  | 193 
 .../apache/hadoop/fs/adl/AdlMockWebServer.java  |  99 ++
 .../apache/hadoop/fs/adl/TestACLFeatures.java   | 262 ++
 .../hadoop/fs/adl/TestADLResponseData.java  | 147 +++
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   | 196 
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 133 +++
 .../adl/TestConcurrentDataReadOperations.java   | 299 ++
 .../hadoop/fs/adl/TestCustomTokenProvider.java  | 136 +++
 .../apache/hadoop/fs/adl/TestGetFileStatus.java |  70 ++
 .../apache/hadoop/fs/adl/TestListStatus.java| 103 +++
 .../fs/adl/TestRelativePathFormation.java   |  61 ++
 .../fs/adl/TestValidateConfiguration.java   | 103 +++
 .../hadoop/fs/adl/TestableAdlFileSystem.java|  30 +
 .../fs/adl/common/CustomMockTokenProvider.java  |  61 ++
 .../hadoop/fs/adl/common/ExpectedResponse.java  |  71 ++
 .../hadoop/fs/adl/common/Parallelized.java  |  60 ++
 .../hadoop/fs/adl/common/TestDataForRead.java   | 122 +++
 .../fs/adl/live/AdlStorageConfiguration.java|  94 ++
 .../hadoop/fs/adl/live/AdlStorageContract.java  |  66 ++
 .../fs/adl/live/TestAdlContractAppendLive.java  |  53 ++
 .../fs/adl/live/TestAdlContractConcatLive.java  |  52 ++
 .../fs/adl/live/TestAdlContractCreateLive.java  |  52 ++
 .../fs/adl/live/TestAdlContractDeleteLive.java  |  44 +
 .../fs/adl/live/TestAdlContractMkdirLive.java   |  55 ++
 .../fs/adl/live/TestAdlContractOpenLive.java|  44 +
 .../fs/adl/live/TestAdlContractRenameLive.java  |  63 ++
 .../fs/adl/live/TestAdlContractRootDirLive.java |  52 ++
 .../fs/adl/live/TestAdlContractSeekLive.java|  44 +
 .../live/TestAdlDifferentSizeWritesLive.java| 102 ++
 .../adl/live/TestAdlFileSystemContractLive.java |  94 ++
 .../src/test/resources/adls.xml | 140 +++
 .../test/resources/contract-test-options.xml|  61 ++
 .../src/test/resources/log4j.properties |  30 +
 hadoop-tools/pom.xml|   1 +
 47 files changed, 4930 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad99438f/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
new file mode 100644
index 000..0a21062
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -0,0 +1,168 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+2.8.0
+../../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-azure-datalake
+  Apache Hadoop Azure Data Lake support
+  
+This module contains code to support integration with Azure Data Lake.
+  
+  jar
+  
+2.4.0
+0.9.1
+UTF-8
+true
+  
+  
+
+  snapshots-repo
+  https://oss.sonatype.org/content/repositories/snapshots
+  false
+  true
+
+  
+  
+
+  
+org.apache.maven.plugins
+maven-project-info-reports-plugin
+
+
+  false
+  false
+  
+
+  
+  
+

[15/20] hadoop git commit: HADOOP-13956. Read ADLS credentials from Credential Provider. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13956. Read ADLS credentials from Credential Provider. (John Zhuge via 
lei)

(cherry picked from commit e015b563197a475e354bf84fd27e7bbcc67e00a4)
(cherry picked from commit 01624f0c1b7eba9fe4b5778e1da9d38545af3722)
(cherry picked from commit 5eaec560d98fe127b5506c50b4c079d14b28b5ef)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/968bbac0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/968bbac0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/968bbac0

Branch: refs/heads/branch-2.8.0
Commit: 968bbac072f6f43356f0e5b3e548bb9bccbf5af3
Parents: 4dc0eb4
Author: Lei Xu 
Authored: Fri Jan 20 14:34:02 2017 +0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:24:45 2017 -0800

--
 .../src/site/markdown/CredentialProviderAPI.md  |   1 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  37 -
 .../src/site/markdown/index.md  |  44 ++
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 152 +++
 4 files changed, 226 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/968bbac0/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
index 209b48d..de871b1 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
@@ -102,6 +102,7 @@ In summary, first, provision the credentials into a 
provider then configure the
 |YARN |WebAppUtils uptakes the use of the credential provider 
API through the new method on Configuration called getPassword. This provides 
an alternative to storing the passwords in clear text within the ssl-server.xml 
file while maintaining backward compatibility.|TODO|
 |AWS  S3/S3A |Uses Configuration.getPassword to get the S3 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[AWS S3/S3A 
Usage](../../hadoop-aws/tools/hadoop-aws/index.html)|
 |Azure  WASB |Uses Configuration.getPassword to get the WASB 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[Azure WASB 
Usage](../../hadoop-azure/index.html)|
+|Azure  ADLS |Uses Configuration.getPassword to get the ADLS 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[Azure ADLS 
Usage](../../hadoop-azure-datalake/index.html)|
 |Apache  Accumulo|The trace.password property is used by the Tracer to 
authenticate with Accumulo and persist the traces in the trace table. The 
credential provider API is used to acquire the trace.password from a provider 
or from configuration for backward compatibility.|TODO|
 |Apache  Slider  |A capability has been added to Slider to prompt the 
user for needed passwords and store them using CredentialProvider so they can 
be retrieved by an app later.|TODO|
 |Apache  Hive|Protection of the metastore password, SSL related 
passwords and JDO string password has been added through the use of the 
Credential Provider API|TODO|

http://git-wip-us.apache.org/repos/asf/hadoop/blob/968bbac0/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index bd43c52..3d41025 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -58,10 +58,12 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.VersionInfo;
+
 import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 
 /**
@@ -224,8 +226,10 @@ public class AdlFileSystem extends FileSystem {
 return azureTokenProvider;
   }
 
-  private AccessTokenProvider 

[20/20] hadoop git commit: HADOOP-14017. User friendly name for ADLS user and group. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-14017. User friendly name for ADLS user and group. Contributed by 
Vishwajeet Dusane

(cherry picked from commit 924def78544a64449785f305cb6984c3559aea4d)
(cherry picked from commit acf20c8318270eaa8fdf6259ec7b07344e3bd7e5)
(cherry picked from commit 518705b8e3b6435894def73dc37104cdba38fe00)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1aa110fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1aa110fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1aa110fc

Branch: refs/heads/branch-2.8.0
Commit: 1aa110fc7a72d77d5ed99a917ec55c196ee63a22
Parents: 726c7c7
Author: Mingliang Liu 
Authored: Tue Feb 21 13:44:42 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:24:46 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  4 +++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 23 +++---
 .../src/site/markdown/index.md  | 26 +++
 .../fs/adl/TestValidateConfiguration.java   |  9 ++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 33 
 5 files changed, 91 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aa110fc/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 21120df..7d31103 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -87,6 +87,10 @@ public final class AdlConfKeys {
   "adl.feature.support.acl.bit";
   static final boolean ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION_DEFAULT = true;
 
+  static final String ADL_ENABLEUPN_FOR_OWNERGROUP_KEY =
+  "adl.feature.ownerandgroup.enableupn";
+  static final boolean ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT = false;
+
   private AdlConfKeys() {
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1aa110fc/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index fb0feda..e0e273e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -32,6 +32,7 @@ import com.microsoft.azure.datalake.store.DirectoryEntry;
 import com.microsoft.azure.datalake.store.DirectoryEntryType;
 import com.microsoft.azure.datalake.store.IfExists;
 import com.microsoft.azure.datalake.store.LatencyTracker;
+import com.microsoft.azure.datalake.store.UserGroupRepresentation;
 import com.microsoft.azure.datalake.store.oauth2.AccessTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
@@ -80,6 +81,8 @@ public class AdlFileSystem extends FileSystem {
   private ADLStoreClient adlClient;
   private Path workingDirectory;
   private boolean aclBitStatus;
+  private UserGroupRepresentation oidOrUpn;
+
 
   // retained for tests
   private AccessTokenProvider tokenProvider;
@@ -181,6 +184,11 @@ public class AdlFileSystem extends FileSystem {
 if (!trackLatency) {
   LatencyTracker.disable();
 }
+
+boolean enableUPN = conf.getBoolean(ADL_ENABLEUPN_FOR_OWNERGROUP_KEY,
+ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT);
+oidOrUpn = enableUPN ? UserGroupRepresentation.UPN :
+UserGroupRepresentation.OID;
   }
 
   /**
@@ -439,7 +447,8 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public FileStatus getFileStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
-DirectoryEntry entry = adlClient.getDirectoryEntry(toRelativeFilePath(f));
+DirectoryEntry entry =
+adlClient.getDirectoryEntry(toRelativeFilePath(f), oidOrUpn);
 return toFileStatus(entry, f);
   }
 
@@ -456,7 +465,7 @@ public class AdlFileSystem extends FileSystem {
   public FileStatus[] listStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
 List entries =
-adlClient.enumerateDirectory(toRelativeFilePath(f));
+adlClient.enumerateDirectory(toRelativeFilePath(f), oidOrUpn);
  

[06/20] hadoop git commit: HADOOP-14049. Honour AclBit flag associated to file/folder permission for Azure datalake account. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-14049. Honour AclBit flag associated to file/folder permission for Azure 
datalake account. Contributed by Vishwajeet Dusane

(cherry picked from commit f4329990250bed62efdebe3ce2bc740092cf9573)
(cherry picked from commit a14686680228d13386ccde7272e3b5bdabe7c792)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0df481c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0df481c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0df481c

Branch: refs/heads/branch-2.8
Commit: f0df481c76fbff4c8f362db89a060ee736d3f411
Parents: 674b9cd
Author: Mingliang Liu 
Authored: Thu Feb 16 15:14:25 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:21:32 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  6 +++-
 .../hadoop/fs/adl/TestADLResponseData.java  | 21 +
 .../apache/hadoop/fs/adl/TestGetFileStatus.java | 25 +++
 .../apache/hadoop/fs/adl/TestListStatus.java| 32 
 4 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0df481c/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 303b7bc..fb0feda 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -593,7 +593,11 @@ public class AdlFileSystem extends FileSystem {
 boolean isDirectory = entry.type == DirectoryEntryType.DIRECTORY;
 long lastModificationData = entry.lastModifiedTime.getTime();
 long lastAccessTime = entry.lastAccessTime.getTime();
-FsPermission permission = new AdlPermission(aclBitStatus,
+// set aclBit from ADLS backend response if
+// ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION is true.
+final boolean aclBit = aclBitStatus ? entry.aclBit : false;
+
+FsPermission permission = new AdlPermission(aclBit,
 Short.valueOf(entry.permission, 8));
 String user = entry.user;
 String group = entry.group;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0df481c/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
index 24eb314..788242e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
@@ -66,6 +66,15 @@ public final class TestADLResponseData {
 "\"owner\":\"NotSupportYet\",\"group\":\"NotSupportYet\"}}";
   }
 
+  public static String getGetFileStatusJSONResponse(boolean aclBit) {
+return "{\"FileStatus\":{\"length\":1024," +
+"\"pathSuffix\":\"\",\"type\":\"FILE\",\"blockSize\":268435456," +
+"\"accessTime\":1452103827023,\"modificationTime\":1452103827023," +
+"\"replication\":0,\"permission\":\"777\"," +
+"\"owner\":\"NotSupportYet\",\"group\":\"NotSupportYet\",\"aclBit\":\""
++ aclBit + "\"}}";
+  }
+
   public static String getListFileStatusJSONResponse(int dirSize) {
 String list = "";
 for (int i = 0; i < dirSize; ++i) {
@@ -81,6 +90,18 @@ public final class TestADLResponseData {
 return "{\"FileStatuses\":{\"FileStatus\":[" + list + "]}}";
   }
 
+  public static String getListFileStatusJSONResponse(boolean aclBit) {
+return "{\"FileStatuses\":{\"FileStatus\":[{\"length\":0,\"pathSuffix\":\""
++ java.util.UUID.randomUUID()
++ "\",\"type\":\"DIRECTORY\",\"blockSize\":0,"
++ "\"accessTime\":1481184513488,"
++ "\"modificationTime\":1481184513488,\"replication\":0,"
++ "\"permission\":\"770\","
++ "\"owner\":\"4b27fe1a-d9ab-4a04-ad7a-4bba72cd9e6c\","
++ "\"group\":\"4b27fe1a-d9ab-4a04-ad7a-4bba72cd9e6c\",\"aclBit\":\""
++ aclBit + "\"}]}}";
+  }
+
   public static String getJSONResponse(boolean status) {
 return "{\"boolean\":" + status + "}";
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0df481c/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java

[02/20] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b4ca91/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
new file mode 100644
index 000..b420daa
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+
+import com.squareup.okhttp.mockwebserver.MockResponse;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Stub adl server and test acl data conversion within SDK and Hadoop adl
+ * client.
+ */
+public class TestACLFeatures extends AdlMockWebServer {
+
+  @Test(expected=AccessControlException.class)
+  public void testModifyAclEntries() throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+aclEntryBuilder.setPermission(FsAction.ALL);
+aclEntryBuilder.setScope(AclEntryScope.ACCESS);
+entries.add(aclEntryBuilder.build());
+
+aclEntryBuilder.setName("hdfs");
+aclEntryBuilder.setType(AclEntryType.GROUP);
+aclEntryBuilder.setPermission(FsAction.READ_WRITE);
+aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
+entries.add(aclEntryBuilder.build());
+
+getMockAdlFileSystem().modifyAclEntries(new Path("/test1/test2"), entries);
+
+getMockServer().enqueue(new MockResponse().setResponseCode(403)
+.setBody(TestADLResponseData.getAccessControlException()));
+
+getMockAdlFileSystem()
+.modifyAclEntries(new Path("/test1/test2"), entries);
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testRemoveAclEntriesWithOnlyUsers()
+  throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+entries.add(aclEntryBuilder.build());
+
+getMockAdlFileSystem().removeAclEntries(new Path("/test1/test2"), entries);
+
+getMockServer().enqueue(new MockResponse().setResponseCode(403)
+.setBody(TestADLResponseData.getAccessControlException()));
+
+getMockAdlFileSystem()
+.removeAclEntries(new Path("/test1/test2"), entries);
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testRemoveAclEntries() throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+aclEntryBuilder.setPermission(FsAction.ALL);
+aclEntryBuilder.setScope(AclEntryScope.ACCESS);
+entries.add(aclEntryBuilder.build());
+
+aclEntryBuilder.setName("hdfs");
+aclEntryBuilder.setType(AclEntryType.GROUP);
+aclEntryBuilder.setPermission(FsAction.READ_WRITE);
+aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
+entries.add(aclEntryBuilder.build());
+
+

[01/20] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ffc00c57d -> 518705b8e
  refs/heads/branch-2.8.0 66e062e53 -> 1aa110fc7


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b4ca91/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
new file mode 100644
index 000..8474e9c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Verify Adls CONCAT semantics compliance with Hadoop.
+ */
+public class TestAdlContractConcatLive extends AbstractContractConcatTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+return new AdlStorageContract(configuration);
+  }
+
+  @Before
+  @Override
+  public void setup() throws Exception {
+org.junit.Assume
+.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+super.setup();
+  }
+
+  @Test
+  public void testConcatMissingTarget() throws Throwable {
+ContractTestUtils.unsupported("BUG : Adl to support expectation from "
++ "concat on missing targets.");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b4ca91/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
new file mode 100644
index 000..907c50c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Verify Adls CREATE semantics compliance with Hadoop.
+ */
+public class TestAdlContractCreateLive extends AbstractContractCreateTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+return new AdlStorageContract(configuration);
+  }
+
+  @Before
+  @Override
+  public void setup() throws Exception {
+org.junit.Assume
+.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+super.setup();
+  }
+
+  @Test
+  public void 

[03/20] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. 
Contributed by Vishwajeet Dusane

(cherry picked from commit edf149b9790a96563fe7bba289a040542c8ab8f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4b4ca91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4b4ca91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4b4ca91

Branch: refs/heads/branch-2.8
Commit: b4b4ca9199fbdcd172be995c5b9b1ff3c468da89
Parents: ffc00c5
Author: Chris Douglas 
Authored: Wed Mar 8 23:18:28 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:21:21 2017 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml  | 168 
 .../main/java/org/apache/hadoop/fs/adl/Adl.java |  56 ++
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  92 ++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 923 +++
 .../apache/hadoop/fs/adl/AdlFsInputStream.java  | 149 +++
 .../apache/hadoop/fs/adl/AdlFsOutputStream.java |  82 ++
 .../org/apache/hadoop/fs/adl/AdlPermission.java |  69 ++
 .../hadoop/fs/adl/SdkTokenProviderAdapter.java  |  41 +
 .../apache/hadoop/fs/adl/TokenProviderType.java |  25 +
 .../fs/adl/oauth2/AzureADTokenProvider.java |  70 ++
 .../hadoop/fs/adl/oauth2/package-info.java  |  23 +
 .../org/apache/hadoop/fs/adl/package-info.java  |  23 +
 .../META-INF/org.apache.hadoop.fs.FileSystem|  16 +
 .../src/site/markdown/index.md  | 193 
 .../apache/hadoop/fs/adl/AdlMockWebServer.java  |  99 ++
 .../apache/hadoop/fs/adl/TestACLFeatures.java   | 262 ++
 .../hadoop/fs/adl/TestADLResponseData.java  | 147 +++
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   | 196 
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 133 +++
 .../adl/TestConcurrentDataReadOperations.java   | 299 ++
 .../hadoop/fs/adl/TestCustomTokenProvider.java  | 136 +++
 .../apache/hadoop/fs/adl/TestGetFileStatus.java |  70 ++
 .../apache/hadoop/fs/adl/TestListStatus.java| 103 +++
 .../fs/adl/TestRelativePathFormation.java   |  61 ++
 .../fs/adl/TestValidateConfiguration.java   | 103 +++
 .../hadoop/fs/adl/TestableAdlFileSystem.java|  30 +
 .../fs/adl/common/CustomMockTokenProvider.java  |  61 ++
 .../hadoop/fs/adl/common/ExpectedResponse.java  |  71 ++
 .../hadoop/fs/adl/common/Parallelized.java  |  60 ++
 .../hadoop/fs/adl/common/TestDataForRead.java   | 122 +++
 .../fs/adl/live/AdlStorageConfiguration.java|  94 ++
 .../hadoop/fs/adl/live/AdlStorageContract.java  |  66 ++
 .../fs/adl/live/TestAdlContractAppendLive.java  |  53 ++
 .../fs/adl/live/TestAdlContractConcatLive.java  |  52 ++
 .../fs/adl/live/TestAdlContractCreateLive.java  |  52 ++
 .../fs/adl/live/TestAdlContractDeleteLive.java  |  44 +
 .../fs/adl/live/TestAdlContractMkdirLive.java   |  55 ++
 .../fs/adl/live/TestAdlContractOpenLive.java|  44 +
 .../fs/adl/live/TestAdlContractRenameLive.java  |  63 ++
 .../fs/adl/live/TestAdlContractRootDirLive.java |  52 ++
 .../fs/adl/live/TestAdlContractSeekLive.java|  44 +
 .../live/TestAdlDifferentSizeWritesLive.java| 102 ++
 .../adl/live/TestAdlFileSystemContractLive.java |  94 ++
 .../src/test/resources/adls.xml | 140 +++
 .../test/resources/contract-test-options.xml|  61 ++
 .../src/test/resources/log4j.properties |  30 +
 hadoop-tools/pom.xml|   1 +
 47 files changed, 4930 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b4ca91/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
new file mode 100644
index 000..ef658a7
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -0,0 +1,168 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+2.8.1-SNAPSHOT
+../../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-azure-datalake
+  Apache Hadoop Azure Data Lake support
+  
+This module contains code to support integration with Azure Data Lake.
+  
+  jar
+  
+2.4.0
+0.9.1
+UTF-8
+true
+  
+  
+
+  snapshots-repo
+  https://oss.sonatype.org/content/repositories/snapshots
+  false
+  true
+
+  
+  
+
+  
+org.apache.maven.plugins
+maven-project-info-reports-plugin
+
+
+  false
+  false
+  
+
+  
+  
+org.apache.maven.plugins
+maven-jar-plugin
+
+  
+   

[04/20] hadoop git commit: HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet 
Dusane

(cherry picked from commit 4113ec5fa5ca049ebaba039b1faf3911c6a34f7b)
(cherry picked from commit 7fd0556b2bcc4eb18c5301a580646a2d7d502b50)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1671e95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1671e95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1671e95

Branch: refs/heads/branch-2.8
Commit: f1671e951931f14c84616bd4072bb20411e258b0
Parents: b4b4ca9
Author: Mingliang Liu 
Authored: Fri Dec 2 15:54:57 2016 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:21:31 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  24 +-
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   |   6 +-
 .../apache/hadoop/fs/adl/TestListStatus.java|   6 +-
 .../fs/adl/live/TestAdlContractAppendLive.java  |  11 +-
 .../fs/adl/live/TestAdlContractConcatLive.java  |  23 +-
 .../fs/adl/live/TestAdlContractCreateLive.java  |  19 +-
 .../fs/adl/live/TestAdlContractDeleteLive.java  |  11 +-
 .../live/TestAdlContractGetFileStatusLive.java  |  36 ++
 .../fs/adl/live/TestAdlContractMkdirLive.java   |  25 +-
 .../fs/adl/live/TestAdlContractOpenLive.java|  11 +-
 .../fs/adl/live/TestAdlContractRenameLive.java  |  30 +-
 .../fs/adl/live/TestAdlContractRootDirLive.java |  19 +-
 .../fs/adl/live/TestAdlContractSeekLive.java|  11 +-
 .../live/TestAdlDifferentSizeWritesLive.java|  69 ++--
 .../live/TestAdlFileContextCreateMkdirLive.java |  67 
 .../TestAdlFileContextMainOperationsLive.java   |  99 ++
 .../adl/live/TestAdlFileSystemContractLive.java |  38 +--
 .../live/TestAdlInternalCreateNonRecursive.java | 134 
 .../fs/adl/live/TestAdlPermissionLive.java  | 116 +++
 .../adl/live/TestAdlSupportedCharsetInPath.java | 336 +++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 111 ++
 21 files changed, 996 insertions(+), 206 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1671e95/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 9083afc..bd43c52 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -346,7 +346,6 @@ public class AdlFileSystem extends FileSystem {
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
-  @Deprecated
   @Override
   public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
   EnumSet flags, int bufferSize, short replication,
@@ -471,6 +470,10 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public boolean rename(final Path src, final Path dst) throws IOException {
 statistics.incrementWriteOps(1);
+if (toRelativeFilePath(src).equals("/")) {
+  return false;
+}
+
 return adlClient.rename(toRelativeFilePath(src), toRelativeFilePath(dst));
   }
 
@@ -522,9 +525,24 @@ public class AdlFileSystem extends FileSystem {
   public boolean delete(final Path path, final boolean recursive)
   throws IOException {
 statistics.incrementWriteOps(1);
+String relativePath = toRelativeFilePath(path);
+// Delete on root directory not supported.
+if (relativePath.equals("/")) {
+  // This is important check after recent commit
+  // HADOOP-12977 and HADOOP-13716 validates on root for
+  // 1. if root is empty and non recursive delete then return false.
+  // 2. if root is non empty and non recursive delete then throw exception.
+  if (!recursive
+  && adlClient.enumerateDirectory(toRelativeFilePath(path), 1).size()
+  > 0) {
+throw new IOException("Delete on root is not supported.");
+  }
+  return false;
+}
+
 return recursive ?
-adlClient.deleteRecursive(toRelativeFilePath(path)) :
-adlClient.delete(toRelativeFilePath(path));
+adlClient.deleteRecursive(relativePath) :
+adlClient.delete(relativePath);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1671e95/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
 

[19/20] hadoop git commit: HADOOP-14049. Honour AclBit flag associated to file/folder permission for Azure datalake account. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-14049. Honour AclBit flag associated to file/folder permission for Azure 
datalake account. Contributed by Vishwajeet Dusane

(cherry picked from commit f4329990250bed62efdebe3ce2bc740092cf9573)
(cherry picked from commit a14686680228d13386ccde7272e3b5bdabe7c792)
(cherry picked from commit f0df481c76fbff4c8f362db89a060ee736d3f411)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/726c7c72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/726c7c72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/726c7c72

Branch: refs/heads/branch-2.8.0
Commit: 726c7c725f9c2c69d8fccb2d6dcae1982167a752
Parents: 9b11f15
Author: Mingliang Liu 
Authored: Thu Feb 16 15:14:25 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:24:46 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  6 +++-
 .../hadoop/fs/adl/TestADLResponseData.java  | 21 +
 .../apache/hadoop/fs/adl/TestGetFileStatus.java | 25 +++
 .../apache/hadoop/fs/adl/TestListStatus.java| 32 
 4 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/726c7c72/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 303b7bc..fb0feda 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -593,7 +593,11 @@ public class AdlFileSystem extends FileSystem {
 boolean isDirectory = entry.type == DirectoryEntryType.DIRECTORY;
 long lastModificationData = entry.lastModifiedTime.getTime();
 long lastAccessTime = entry.lastAccessTime.getTime();
-FsPermission permission = new AdlPermission(aclBitStatus,
+// set aclBit from ADLS backend response if
+// ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION is true.
+final boolean aclBit = aclBitStatus ? entry.aclBit : false;
+
+FsPermission permission = new AdlPermission(aclBit,
 Short.valueOf(entry.permission, 8));
 String user = entry.user;
 String group = entry.group;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/726c7c72/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
index 24eb314..788242e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
@@ -66,6 +66,15 @@ public final class TestADLResponseData {
 "\"owner\":\"NotSupportYet\",\"group\":\"NotSupportYet\"}}";
   }
 
+  public static String getGetFileStatusJSONResponse(boolean aclBit) {
+return "{\"FileStatus\":{\"length\":1024," +
+"\"pathSuffix\":\"\",\"type\":\"FILE\",\"blockSize\":268435456," +
+"\"accessTime\":1452103827023,\"modificationTime\":1452103827023," +
+"\"replication\":0,\"permission\":\"777\"," +
+"\"owner\":\"NotSupportYet\",\"group\":\"NotSupportYet\",\"aclBit\":\""
++ aclBit + "\"}}";
+  }
+
   public static String getListFileStatusJSONResponse(int dirSize) {
 String list = "";
 for (int i = 0; i < dirSize; ++i) {
@@ -81,6 +90,18 @@ public final class TestADLResponseData {
 return "{\"FileStatuses\":{\"FileStatus\":[" + list + "]}}";
   }
 
+  public static String getListFileStatusJSONResponse(boolean aclBit) {
+return "{\"FileStatuses\":{\"FileStatus\":[{\"length\":0,\"pathSuffix\":\""
++ java.util.UUID.randomUUID()
++ "\",\"type\":\"DIRECTORY\",\"blockSize\":0,"
++ "\"accessTime\":1481184513488,"
++ "\"modificationTime\":1481184513488,\"replication\":0,"
++ "\"permission\":\"770\","
++ "\"owner\":\"4b27fe1a-d9ab-4a04-ad7a-4bba72cd9e6c\","
++ "\"group\":\"4b27fe1a-d9ab-4a04-ad7a-4bba72cd9e6c\",\"aclBit\":\""
++ aclBit + "\"}]}}";
+  }
+
   public static String getJSONResponse(boolean status) {
 return "{\"boolean\":" + status + "}";
   }


[07/20] hadoop git commit: HADOOP-13929. ADLS connector should not check in contract-test-options.xml. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13929. ADLS connector should not check in contract-test-options.xml. 
(John Zhuge via lei)

(cherry picked from commit 71c23c9fc94cfdf58de80effbc3f51c0925d0cfe)
(cherry picked from commit ed4388f903d34a2c09845d200e1e717397b217e1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/674b9cda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/674b9cda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/674b9cda

Branch: refs/heads/branch-2.8
Commit: 674b9cdab1e753a070dfb104f499d829f7ba4094
Parents: 87abe01
Author: Lei Xu 
Authored: Mon Feb 13 13:33:13 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:21:32 2017 -0800

--
 .gitignore  | 10 ++--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  2 +-
 .../src/site/markdown/index.md  |  4 +-
 .../fs/adl/live/AdlStorageConfiguration.java| 42 +++---
 .../src/test/resources/adls.xml | 11 
 .../test/resources/contract-test-options.xml| 61 
 6 files changed, 39 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/674b9cda/.gitignore
--
diff --git a/.gitignore b/.gitignore
index eb1fc96..2a82ba7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,10 @@
 target
 build
 
+# Filesystem contract test options and credentials
+auth-keys.xml
+azure-auth-keys.xml
+
 # External tool builders
 */.externalToolBuilders
 */maven-eclipse.xml
@@ -22,12 +26,6 @@ build
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
-hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
 yarnregistry.pdf
-hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
-hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/674b9cda/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 3d41025..303b7bc 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -72,7 +72,7 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
-  static final String SCHEME = "adl";
+  public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
   private String userName;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/674b9cda/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ced5cff..5037db6 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -224,7 +224,9 @@ commands demonstrate access to a storage account named 
`youraccount`.
 ## Testing the 
azure-datalake-store Module
 The hadoop-azure module includes a full suite of unit tests. Most of the tests 
will run without additional configuration by running mvn test. This includes 
tests against mocked storage, which is an in-memory emulation of Azure Data 
Lake Storage.
 
-A selection of tests can run against the Azure Data Lake Storage. To run tests 
against Adl storage. Please configure contract-test-options.xml with Adl 
account information mentioned in the above sections. Also turn on contract test 
execution flag to trigger tests against Azure Data Lake Storage.
+A selection of tests can run against the Azure Data Lake Storage. To run these
+tests, please create `src/test/resources/auth-keys.xml` with Adl account
+information mentioned in the above sections and the following properties.
 
 
 dfs.adl.test.contract.enable


[09/20] hadoop git commit: HADOOP-13962. Update ADLS SDK to 2.1.4. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13962. Update ADLS SDK to 2.1.4. (John Zhuge via lei)

(cherry picked from commit ccf2d662443fc169835d66b715441529658b245c)
(cherry picked from commit d3a6124ffaa76f0aeaad1a3c58e005a65f507a10)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87abe01a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87abe01a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87abe01a

Branch: refs/heads/branch-2.8
Commit: 87abe01ab1edf0bfe3e9ac10a78cbb1c3ccf1a12
Parents: 5eaec56
Author: Lei Xu 
Authored: Sat Jan 21 10:52:02 2017 +0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:21:32 2017 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87abe01a/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 2944922..267eb6c 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -121,7 +121,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.0.11
+  2.1.4
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/20] hadoop git commit: HADOOP-13962. Update ADLS SDK to 2.1.4. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13962. Update ADLS SDK to 2.1.4. (John Zhuge via lei)

(cherry picked from commit ccf2d662443fc169835d66b715441529658b245c)
(cherry picked from commit d3a6124ffaa76f0aeaad1a3c58e005a65f507a10)
(cherry picked from commit 87abe01ab1edf0bfe3e9ac10a78cbb1c3ccf1a12)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e101ca2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e101ca2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e101ca2

Branch: refs/heads/branch-2.8.0
Commit: 9e101ca26de9d1031c959f6ce54eee8b642d7010
Parents: 968bbac
Author: Lei Xu 
Authored: Sat Jan 21 10:52:02 2017 +0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:24:45 2017 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e101ca2/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 186f3b9..a8c3b16 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -121,7 +121,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.0.11
+  2.1.4
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/10] hadoop git commit: HADOOP-13956. Read ADLS credentials from Credential Provider. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13956. Read ADLS credentials from Credential Provider. (John Zhuge via 
lei)

(cherry picked from commit e015b563197a475e354bf84fd27e7bbcc67e00a4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01624f0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01624f0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01624f0c

Branch: refs/heads/branch-2
Commit: 01624f0c1b7eba9fe4b5778e1da9d38545af3722
Parents: 881b634
Author: Lei Xu 
Authored: Fri Jan 20 14:34:02 2017 +0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:16:28 2017 -0800

--
 .../src/site/markdown/CredentialProviderAPI.md  |   1 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  37 -
 .../src/site/markdown/index.md  |  44 ++
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 152 +++
 4 files changed, 226 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01624f0c/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
index a40bf2b..30dfdd8 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
@@ -102,6 +102,7 @@ In summary, first, provision the credentials into a 
provider then configure the
 |YARN |WebAppUtils uptakes the use of the credential provider 
API through the new method on Configuration called getPassword. This provides 
an alternative to storing the passwords in clear text within the ssl-server.xml 
file while maintaining backward compatibility.|TODO|
 |AWS  S3/S3A |Uses Configuration.getPassword to get the S3 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[AWS S3/S3A 
Usage](../../hadoop-aws/tools/hadoop-aws/index.html)|
 |Azure  WASB |Uses Configuration.getPassword to get the WASB 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[Azure WASB 
Usage](../../hadoop-azure/index.html)|
+|Azure  ADLS |Uses Configuration.getPassword to get the ADLS 
credentials. They may be resolved through the credential provider API or from 
the config for backward compatibility.|[Azure ADLS 
Usage](../../hadoop-azure-datalake/index.html)|
 |Apache  Accumulo|The trace.password property is used by the Tracer to 
authenticate with Accumulo and persist the traces in the trace table. The 
credential provider API is used to acquire the trace.password from a provider 
or from configuration for backward compatibility.|TODO|
 |Apache  Slider  |A capability has been added to Slider to prompt the 
user for needed passwords and store them using CredentialProvider so they can 
be retrieved by an app later.|TODO|
 |Apache  Hive|Protection of the metastore password, SSL related 
passwords and JDO string password has been added through the use of the 
Credential Provider API|TODO|

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01624f0c/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index bd43c52..3d41025 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -58,10 +58,12 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.VersionInfo;
+
 import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 
 /**
@@ -224,8 +226,10 @@ public class AdlFileSystem extends FileSystem {
 return azureTokenProvider;
   }
 
-  private AccessTokenProvider getAccessTokenProvider(Configuration conf)
+  private AccessTokenProvider getAccessTokenProvider(Configuration config)
   throws IOException {
+Configuration conf = 

[06/10] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/edf149b9/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
new file mode 100644
index 000..b420daa
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+
+import com.squareup.okhttp.mockwebserver.MockResponse;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Stub adl server and test acl data conversion within SDK and Hadoop adl
+ * client.
+ */
+public class TestACLFeatures extends AdlMockWebServer {
+
+  @Test(expected=AccessControlException.class)
+  public void testModifyAclEntries() throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+aclEntryBuilder.setPermission(FsAction.ALL);
+aclEntryBuilder.setScope(AclEntryScope.ACCESS);
+entries.add(aclEntryBuilder.build());
+
+aclEntryBuilder.setName("hdfs");
+aclEntryBuilder.setType(AclEntryType.GROUP);
+aclEntryBuilder.setPermission(FsAction.READ_WRITE);
+aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
+entries.add(aclEntryBuilder.build());
+
+getMockAdlFileSystem().modifyAclEntries(new Path("/test1/test2"), entries);
+
+getMockServer().enqueue(new MockResponse().setResponseCode(403)
+.setBody(TestADLResponseData.getAccessControlException()));
+
+getMockAdlFileSystem()
+.modifyAclEntries(new Path("/test1/test2"), entries);
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testRemoveAclEntriesWithOnlyUsers()
+  throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+entries.add(aclEntryBuilder.build());
+
+getMockAdlFileSystem().removeAclEntries(new Path("/test1/test2"), entries);
+
+getMockServer().enqueue(new MockResponse().setResponseCode(403)
+.setBody(TestADLResponseData.getAccessControlException()));
+
+getMockAdlFileSystem()
+.removeAclEntries(new Path("/test1/test2"), entries);
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testRemoveAclEntries() throws URISyntaxException, IOException {
+getMockServer().enqueue(new MockResponse().setResponseCode(200));
+List entries = new ArrayList();
+AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
+aclEntryBuilder.setName("hadoop");
+aclEntryBuilder.setType(AclEntryType.USER);
+aclEntryBuilder.setPermission(FsAction.ALL);
+aclEntryBuilder.setScope(AclEntryScope.ACCESS);
+entries.add(aclEntryBuilder.build());
+
+aclEntryBuilder.setName("hdfs");
+aclEntryBuilder.setType(AclEntryType.GROUP);
+aclEntryBuilder.setPermission(FsAction.READ_WRITE);
+aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
+entries.add(aclEntryBuilder.build());
+
+

[02/10] hadoop git commit: HADOOP-13929. ADLS connector should not check in contract-test-options.xml. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13929. ADLS connector should not check in contract-test-options.xml. 
(John Zhuge via lei)

(cherry picked from commit 71c23c9fc94cfdf58de80effbc3f51c0925d0cfe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed4388f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed4388f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed4388f9

Branch: refs/heads/branch-2
Commit: ed4388f903d34a2c09845d200e1e717397b217e1
Parents: d3a6124
Author: Lei Xu 
Authored: Mon Feb 13 13:33:13 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:16:28 2017 -0800

--
 .gitignore  | 10 ++--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  2 +-
 .../src/site/markdown/index.md  |  4 +-
 .../fs/adl/live/AdlStorageConfiguration.java| 42 +++---
 .../src/test/resources/adls.xml | 11 
 .../test/resources/contract-test-options.xml| 61 
 6 files changed, 39 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed4388f9/.gitignore
--
diff --git a/.gitignore b/.gitignore
index eb1fc96..2a82ba7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,10 @@
 target
 build
 
+# Filesystem contract test options and credentials
+auth-keys.xml
+azure-auth-keys.xml
+
 # External tool builders
 */.externalToolBuilders
 */maven-eclipse.xml
@@ -22,12 +26,6 @@ build
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
-hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
 yarnregistry.pdf
-hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
-hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed4388f9/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 3d41025..303b7bc 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -72,7 +72,7 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
-  static final String SCHEME = "adl";
+  public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
   private String userName;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed4388f9/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ced5cff..5037db6 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -224,7 +224,9 @@ commands demonstrate access to a storage account named 
`youraccount`.
 ## Testing the 
azure-datalake-store Module
 The hadoop-azure module includes a full suite of unit tests. Most of the tests 
will run without additional configuration by running mvn test. This includes 
tests against mocked storage, which is an in-memory emulation of Azure Data 
Lake Storage.
 
-A selection of tests can run against the Azure Data Lake Storage. To run tests 
against Adl storage. Please configure contract-test-options.xml with Adl 
account information mentioned in the above sections. Also turn on contract test 
execution flag to trigger tests against Azure Data Lake Storage.
+A selection of tests can run against the Azure Data Lake Storage. To run these
+tests, please create `src/test/resources/auth-keys.xml` with Adl account
+information mentioned in the above sections and the following properties.
 
 
 dfs.adl.test.contract.enable


[10/10] hadoop git commit: HADOOP-13962. Update ADLS SDK to 2.1.4. (John Zhuge via lei)

2017-03-09 Thread cdouglas
HADOOP-13962. Update ADLS SDK to 2.1.4. (John Zhuge via lei)

(cherry picked from commit ccf2d662443fc169835d66b715441529658b245c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3a6124f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3a6124f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3a6124f

Branch: refs/heads/branch-2
Commit: d3a6124ffaa76f0aeaad1a3c58e005a65f507a10
Parents: 01624f0
Author: Lei Xu 
Authored: Sat Jan 21 10:52:02 2017 +0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:16:28 2017 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3a6124f/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index cd7980a..de5fa95 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -121,7 +121,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.0.11
+  2.1.4
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/10] hadoop git commit: HADOOP-14017. User friendly name for ADLS user and group. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5cdcda34a -> acf20c831


HADOOP-14017. User friendly name for ADLS user and group. Contributed by 
Vishwajeet Dusane

(cherry picked from commit 924def78544a64449785f305cb6984c3559aea4d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acf20c83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acf20c83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acf20c83

Branch: refs/heads/branch-2
Commit: acf20c8318270eaa8fdf6259ec7b07344e3bd7e5
Parents: a146866
Author: Mingliang Liu 
Authored: Tue Feb 21 13:44:42 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:16:28 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  4 +++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 23 +++---
 .../src/site/markdown/index.md  | 26 +++
 .../fs/adl/TestValidateConfiguration.java   |  9 ++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 33 
 5 files changed, 91 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acf20c83/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 21120df..7d31103 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -87,6 +87,10 @@ public final class AdlConfKeys {
   "adl.feature.support.acl.bit";
   static final boolean ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION_DEFAULT = true;
 
+  static final String ADL_ENABLEUPN_FOR_OWNERGROUP_KEY =
+  "adl.feature.ownerandgroup.enableupn";
+  static final boolean ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT = false;
+
   private AdlConfKeys() {
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acf20c83/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index fb0feda..e0e273e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -32,6 +32,7 @@ import com.microsoft.azure.datalake.store.DirectoryEntry;
 import com.microsoft.azure.datalake.store.DirectoryEntryType;
 import com.microsoft.azure.datalake.store.IfExists;
 import com.microsoft.azure.datalake.store.LatencyTracker;
+import com.microsoft.azure.datalake.store.UserGroupRepresentation;
 import com.microsoft.azure.datalake.store.oauth2.AccessTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import 
com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
@@ -80,6 +81,8 @@ public class AdlFileSystem extends FileSystem {
   private ADLStoreClient adlClient;
   private Path workingDirectory;
   private boolean aclBitStatus;
+  private UserGroupRepresentation oidOrUpn;
+
 
   // retained for tests
   private AccessTokenProvider tokenProvider;
@@ -181,6 +184,11 @@ public class AdlFileSystem extends FileSystem {
 if (!trackLatency) {
   LatencyTracker.disable();
 }
+
+boolean enableUPN = conf.getBoolean(ADL_ENABLEUPN_FOR_OWNERGROUP_KEY,
+ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT);
+oidOrUpn = enableUPN ? UserGroupRepresentation.UPN :
+UserGroupRepresentation.OID;
   }
 
   /**
@@ -439,7 +447,8 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public FileStatus getFileStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
-DirectoryEntry entry = adlClient.getDirectoryEntry(toRelativeFilePath(f));
+DirectoryEntry entry =
+adlClient.getDirectoryEntry(toRelativeFilePath(f), oidOrUpn);
 return toFileStatus(entry, f);
   }
 
@@ -456,7 +465,7 @@ public class AdlFileSystem extends FileSystem {
   public FileStatus[] listStatus(final Path f) throws IOException {
 statistics.incrementReadOps(1);
 List entries =
-adlClient.enumerateDirectory(toRelativeFilePath(f));
+adlClient.enumerateDirectory(toRelativeFilePath(f), oidOrUpn);
 return toFileStatuses(entries, f);
   }
 
@@ -749,8 

[05/10] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
http://git-wip-us.apache.org/repos/asf/hadoop/blob/edf149b9/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
new file mode 100644
index 000..8474e9c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Verify Adls CONCAT semantics compliance with Hadoop.
+ */
+public class TestAdlContractConcatLive extends AbstractContractConcatTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+return new AdlStorageContract(configuration);
+  }
+
+  @Before
+  @Override
+  public void setup() throws Exception {
+org.junit.Assume
+.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+super.setup();
+  }
+
+  @Test
+  public void testConcatMissingTarget() throws Throwable {
+ContractTestUtils.unsupported("BUG : Adl to support expectation from "
++ "concat on missing targets.");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/edf149b9/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
new file mode 100644
index 000..907c50c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractCreateLive.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.adl.live;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Verify Adls CREATE semantics compliance with Hadoop.
+ */
+public class TestAdlContractCreateLive extends AbstractContractCreateTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+return new AdlStorageContract(configuration);
+  }
+
+  @Before
+  @Override
+  public void setup() throws Exception {
+org.junit.Assume
+.assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
+super.setup();
+  }
+
+  @Test
+  public void testOverwriteEmptyDirectory() throws Throwable {
+ContractTestUtils
+.unsupported("BUG : Adl to support override empty " + 

[03/10] hadoop git commit: HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-13257. Improve Azure Data Lake contract tests. Contributed by Vishwajeet 
Dusane

(cherry picked from commit 4113ec5fa5ca049ebaba039b1faf3911c6a34f7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fd0556b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fd0556b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fd0556b

Branch: refs/heads/branch-2
Commit: 7fd0556b2bcc4eb18c5301a580646a2d7d502b50
Parents: edf149b
Author: Mingliang Liu 
Authored: Fri Dec 2 15:54:57 2016 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:16:28 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  24 +-
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   |   6 +-
 .../apache/hadoop/fs/adl/TestListStatus.java|   6 +-
 .../fs/adl/live/TestAdlContractAppendLive.java  |  11 +-
 .../fs/adl/live/TestAdlContractConcatLive.java  |  23 +-
 .../fs/adl/live/TestAdlContractCreateLive.java  |  19 +-
 .../fs/adl/live/TestAdlContractDeleteLive.java  |  11 +-
 .../live/TestAdlContractGetFileStatusLive.java  |  36 ++
 .../fs/adl/live/TestAdlContractMkdirLive.java   |  25 +-
 .../fs/adl/live/TestAdlContractOpenLive.java|  11 +-
 .../fs/adl/live/TestAdlContractRenameLive.java  |  30 +-
 .../fs/adl/live/TestAdlContractRootDirLive.java |  19 +-
 .../fs/adl/live/TestAdlContractSeekLive.java|  11 +-
 .../live/TestAdlDifferentSizeWritesLive.java|  69 ++--
 .../live/TestAdlFileContextCreateMkdirLive.java |  67 
 .../TestAdlFileContextMainOperationsLive.java   |  99 ++
 .../adl/live/TestAdlFileSystemContractLive.java |  38 +--
 .../live/TestAdlInternalCreateNonRecursive.java | 134 
 .../fs/adl/live/TestAdlPermissionLive.java  | 116 +++
 .../adl/live/TestAdlSupportedCharsetInPath.java | 336 +++
 .../apache/hadoop/fs/adl/live/TestMetadata.java | 111 ++
 21 files changed, 996 insertions(+), 206 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd0556b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 9083afc..bd43c52 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -346,7 +346,6 @@ public class AdlFileSystem extends FileSystem {
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
-  @Deprecated
   @Override
   public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
   EnumSet flags, int bufferSize, short replication,
@@ -471,6 +470,10 @@ public class AdlFileSystem extends FileSystem {
   @Override
   public boolean rename(final Path src, final Path dst) throws IOException {
 statistics.incrementWriteOps(1);
+if (toRelativeFilePath(src).equals("/")) {
+  return false;
+}
+
 return adlClient.rename(toRelativeFilePath(src), toRelativeFilePath(dst));
   }
 
@@ -522,9 +525,24 @@ public class AdlFileSystem extends FileSystem {
   public boolean delete(final Path path, final boolean recursive)
   throws IOException {
 statistics.incrementWriteOps(1);
+String relativePath = toRelativeFilePath(path);
+// Delete on root directory not supported.
+if (relativePath.equals("/")) {
+  // This is important check after recent commit
+  // HADOOP-12977 and HADOOP-13716 validates on root for
+  // 1. if root is empty and non recursive delete then return false.
+  // 2. if root is non empty and non recursive delete then throw exception.
+  if (!recursive
+  && adlClient.enumerateDirectory(toRelativeFilePath(path), 1).size()
+  > 0) {
+throw new IOException("Delete on root is not supported.");
+  }
+  return false;
+}
+
 return recursive ?
-adlClient.deleteRecursive(toRelativeFilePath(path)) :
-adlClient.delete(toRelativeFilePath(path));
+adlClient.deleteRecursive(relativePath) :
+adlClient.delete(relativePath);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd0556b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
 

[07/10] hadoop git commit: HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. Contributed by Vishwajeet Dusane

2017-03-09 Thread cdouglas
HADOOP-13037. Refactor Azure Data Lake Store as an independent FileSystem. 
Contributed by Vishwajeet Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edf149b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edf149b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edf149b9

Branch: refs/heads/branch-2
Commit: edf149b9790a96563fe7bba289a040542c8ab8f2
Parents: 5cdcda3
Author: Chris Douglas 
Authored: Wed Mar 8 23:18:28 2017 -0800
Committer: Chris Douglas 
Committed: Thu Mar 9 15:16:28 2017 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml  | 168 
 .../main/java/org/apache/hadoop/fs/adl/Adl.java |  56 ++
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  92 ++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 923 +++
 .../apache/hadoop/fs/adl/AdlFsInputStream.java  | 149 +++
 .../apache/hadoop/fs/adl/AdlFsOutputStream.java |  82 ++
 .../org/apache/hadoop/fs/adl/AdlPermission.java |  69 ++
 .../hadoop/fs/adl/SdkTokenProviderAdapter.java  |  41 +
 .../apache/hadoop/fs/adl/TokenProviderType.java |  25 +
 .../fs/adl/oauth2/AzureADTokenProvider.java |  70 ++
 .../hadoop/fs/adl/oauth2/package-info.java  |  23 +
 .../org/apache/hadoop/fs/adl/package-info.java  |  23 +
 .../META-INF/org.apache.hadoop.fs.FileSystem|  16 +
 .../src/site/markdown/index.md  | 193 
 .../apache/hadoop/fs/adl/AdlMockWebServer.java  |  99 ++
 .../apache/hadoop/fs/adl/TestACLFeatures.java   | 262 ++
 .../hadoop/fs/adl/TestADLResponseData.java  | 147 +++
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   | 196 
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 133 +++
 .../adl/TestConcurrentDataReadOperations.java   | 299 ++
 .../hadoop/fs/adl/TestCustomTokenProvider.java  | 136 +++
 .../apache/hadoop/fs/adl/TestGetFileStatus.java |  70 ++
 .../apache/hadoop/fs/adl/TestListStatus.java| 103 +++
 .../fs/adl/TestRelativePathFormation.java   |  61 ++
 .../fs/adl/TestValidateConfiguration.java   | 103 +++
 .../hadoop/fs/adl/TestableAdlFileSystem.java|  30 +
 .../fs/adl/common/CustomMockTokenProvider.java  |  61 ++
 .../hadoop/fs/adl/common/ExpectedResponse.java  |  71 ++
 .../hadoop/fs/adl/common/Parallelized.java  |  60 ++
 .../hadoop/fs/adl/common/TestDataForRead.java   | 122 +++
 .../fs/adl/live/AdlStorageConfiguration.java|  94 ++
 .../hadoop/fs/adl/live/AdlStorageContract.java  |  66 ++
 .../fs/adl/live/TestAdlContractAppendLive.java  |  53 ++
 .../fs/adl/live/TestAdlContractConcatLive.java  |  52 ++
 .../fs/adl/live/TestAdlContractCreateLive.java  |  52 ++
 .../fs/adl/live/TestAdlContractDeleteLive.java  |  44 +
 .../fs/adl/live/TestAdlContractMkdirLive.java   |  55 ++
 .../fs/adl/live/TestAdlContractOpenLive.java|  44 +
 .../fs/adl/live/TestAdlContractRenameLive.java  |  63 ++
 .../fs/adl/live/TestAdlContractRootDirLive.java |  52 ++
 .../fs/adl/live/TestAdlContractSeekLive.java|  44 +
 .../live/TestAdlDifferentSizeWritesLive.java| 102 ++
 .../adl/live/TestAdlFileSystemContractLive.java |  94 ++
 .../src/test/resources/adls.xml | 140 +++
 .../test/resources/contract-test-options.xml|  61 ++
 .../src/test/resources/log4j.properties |  30 +
 hadoop-tools/pom.xml|   1 +
 47 files changed, 4930 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edf149b9/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
new file mode 100644
index 000..01d101f
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -0,0 +1,168 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project
+2.9.0-SNAPSHOT
+../../hadoop-project
+  
+  org.apache.hadoop
+  hadoop-azure-datalake
+  Apache Hadoop Azure Data Lake support
+  
+This module contains code to support integration with Azure Data Lake.
+  
+  jar
+  
+2.4.0
+0.9.1
+UTF-8
+true
+  
+  
+
+  snapshots-repo
+  https://oss.sonatype.org/content/repositories/snapshots
+  false
+  true
+
+  
+  
+
+  
+org.apache.maven.plugins
+maven-project-info-reports-plugin
+
+
+  false
+  false
+  
+
+  
+  
+org.apache.maven.plugins
+maven-jar-plugin
+
+  
+
+  test-jar
+
+  
+
+ 

hadoop git commit: HDFS-11477. Simplify file IO profiling configuration. Contributed by Hanisha Koneru.

2017-03-09 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b6c477691 -> 5cdcda34a


HDFS-11477. Simplify file IO profiling configuration. Contributed by Hanisha 
Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cdcda34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cdcda34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cdcda34

Branch: refs/heads/branch-2
Commit: 5cdcda34a2e859862eb68bff5844c18ac4d27917
Parents: b6c4776
Author: Arpit Agarwal 
Authored: Thu Mar 9 14:34:10 2017 -0800
Committer: Arpit Agarwal 
Committed: Thu Mar 9 14:34:10 2017 -0800

--
 .../hadoop-common/src/site/markdown/Metrics.md  |  7 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  8 ++
 .../apache/hadoop/hdfs/server/common/Util.java  | 26 +---
 .../hadoop/hdfs/server/datanode/DNConf.java |  7 +++---
 .../hdfs/server/datanode/FileIoProvider.java|  2 +-
 .../server/datanode/ProfilingFileIoEvents.java  | 11 -
 .../src/main/resources/hdfs-default.xml |  2 +-
 .../datanode/TestDataNodeVolumeMetrics.java |  4 +--
 .../hadoop/tools/TestHdfsConfigFields.java  |  2 --
 9 files changed, 43 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cdcda34/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index e52a6a7..020d1ad 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -323,7 +323,12 @@ Each metrics record contains tags such as SessionId and 
Hostname as additional i
 FsVolume
 
 
-Per-volume metrics contain Datanode Volume IO related statistics. Per-volume 
metrics are off by default. They can be enbabled by setting 
`dfs.datanode.enable.fileio.profiling` to **true**, but enabling per-volume 
metrics may have a performance impact. Each metrics record contains tags such 
as Hostname as additional information along with metrics.
+Per-volume metrics contain Datanode Volume IO related statistics. Per-volume
+metrics are off by default. They can be enabled by setting `dfs.datanode
+.fileio.profiling.sampling.fraction` to a fraction between 0.0 and 1.0.
+Setting this value to 0.0 would mean profiling is not enabled. But enabling
+per-volume metrics may have a performance impact. Each metrics record
+contains tags such as Hostname as additional information along with metrics.
 
 | Name | Description |
 |: |: |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cdcda34/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e9e6bcc..4732359 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -634,7 +634,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int 
DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT = 1;
 
   public static final String DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY =
-  "dfs.datanode.slow.peers.report.interval";
+  "dfs.datanode.outliers.report.interval";
   public static final int DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT =
   1800 * 1000;
 
@@ -664,10 +664,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT = 64 * 1024;
 
   // Datanode File IO Stats
-  public static final String DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY =
-  "dfs.datanode.enable.fileio.profiling";
-  public static final boolean DFS_DATANODE_ENABLE_FILEIO_PROFILING_DEFAULT =
-  false;
   public static final String DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY =
   "dfs.datanode.enable.fileio.fault.injection";
   public static final boolean
@@ -676,7 +672,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY =
   "dfs.datanode.fileio.profiling.sampling.fraction";
   public static final double
-  DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEAFULT = 1.0;
+  DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT = 0.0;
 
   //Keys with no defaults
   public static final 

hadoop git commit: HDFS-11461. DataNode Disk Outlier Detection. Contributed by Hanisha Koneru.

2017-03-09 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 60be2e5d8 -> b6c477691


HDFS-11461. DataNode Disk Outlier Detection. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6c47769
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6c47769
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6c47769

Branch: refs/heads/branch-2
Commit: b6c4776911c264b1e68e85100487e4456705e775
Parents: 60be2e5
Author: Arpit Agarwal 
Authored: Thu Mar 9 12:59:48 2017 -0800
Committer: Arpit Agarwal 
Committed: Thu Mar 9 12:59:48 2017 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 +-
 .../server/blockmanagement/SlowPeerTracker.java |   4 +-
 .../hdfs/server/datanode/BPServiceActor.java|   2 +-
 .../hadoop/hdfs/server/datanode/DNConf.java |  16 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  10 +
 .../datanode/metrics/DataNodeDiskMetrics.java   | 181 +
 .../datanode/metrics/DataNodePeerMetrics.java   |   6 +-
 .../datanode/metrics/OutlierDetector.java   | 182 +
 .../datanode/metrics/SlowNodeDetector.java  | 194 ---
 .../src/main/resources/hdfs-default.xml |   2 +-
 .../TestDataNodeOutlierDetectionViaMetrics.java |   6 +-
 .../datanode/metrics/TestSlowNodeDetector.java  |  30 +--
 12 files changed, 411 insertions(+), 226 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6c47769/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 853306b..e9e6bcc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -633,9 +633,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT = 
"dfs.block.misreplication.processing.limit";
   public static final int 
DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT = 1;
 
-  public static final String DFS_DATANODE_SLOW_PEERS_REPORT_INTERVAL_KEY =
+  public static final String DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY =
   "dfs.datanode.slow.peers.report.interval";
-  public static final int DFS_DATANODE_SLOW_PEERS_REPORT_INTERVAL_DEFAULT =
+  public static final int DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT =
   1800 * 1000;
 
   // property for fsimage compression

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6c47769/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
index a1ffd20..c8a6348 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
@@ -94,8 +94,8 @@ public class SlowPeerTracker {
 this.timer = timer;
 this.allReports = new ConcurrentHashMap<>();
 this.reportValidityMs = conf.getTimeDuration(
-DFSConfigKeys.DFS_DATANODE_SLOW_PEERS_REPORT_INTERVAL_KEY,
-DFSConfigKeys.DFS_DATANODE_SLOW_PEERS_REPORT_INTERVAL_DEFAULT,
+DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
+DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT,
 TimeUnit.MILLISECONDS) * 3;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6c47769/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index c605588..adbf025 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -129,7 +129,7 @@ class BPServiceActor implements 

hadoop git commit: YARN-6300. NULL_UPDATE_REQUESTS is redundant in TestFairScheduler (Contributed by Yuanbo Liu via Daniel Templeton)

2017-03-09 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 385d2cb77 -> 822a74f2a


YARN-6300. NULL_UPDATE_REQUESTS is redundant in TestFairScheduler (Contributed 
by Yuanbo Liu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/822a74f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/822a74f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/822a74f2

Branch: refs/heads/trunk
Commit: 822a74f2ae955ea0893cc02fb36ceb49ceba8014
Parents: 385d2cb
Author: Daniel Templeton 
Authored: Thu Mar 9 12:12:47 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Mar 9 12:14:33 2017 -0800

--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/822a74f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 31dd7fe..028eea6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -96,7 +96,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdate
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 
 
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -119,7 +118,6 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.xml.sax.SAXException;
 
@@ -130,8 +128,6 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   private final int GB = 1024;
   private final static String ALLOC_FILE =
   new File(TEST_DIR, "test-queues").getAbsolutePath();
-  private final static ContainerUpdates NULL_UPDATE_REQUESTS =
-  new ContainerUpdates();
 
   @Before
   public void setUp() throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11513. Ozone: Separate XceiverServer and XceiverClient into interfaces and implementations.

2017-03-09 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 5e95bdeef -> fbc8099b6


HDFS-11513. Ozone: Separate XceiverServer and XceiverClient into interfaces and 
implementations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbc8099b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbc8099b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbc8099b

Branch: refs/heads/HDFS-7240
Commit: fbc8099b636b186b749e9b4404005890d7c0b069
Parents: 5e95bde
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Mar 9 11:03:20 2017 -0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Mar 9 11:03:20 2017 -0800

--
 .../org/apache/hadoop/scm/XceiverClient.java| 24 ++---
 .../apache/hadoop/scm/XceiverClientManager.java | 12 ++---
 .../org/apache/hadoop/scm/XceiverClientSpi.java | 56 
 .../scm/client/ContainerOperationClient.java|  4 +-
 .../hadoop/scm/storage/ChunkInputStream.java|  6 +--
 .../hadoop/scm/storage/ChunkOutputStream.java   |  6 +--
 .../scm/storage/ContainerProtocolCalls.java | 16 +++---
 .../common/transport/server/XceiverServer.java  | 14 ++---
 .../transport/server/XceiverServerSpi.java  | 30 +++
 .../container/ozoneimpl/OzoneContainer.java |  3 +-
 .../web/storage/DistributedStorageHandler.java  | 49 +++--
 .../ozone/scm/TestContainerSmallFile.java   |  8 +--
 12 files changed, 140 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc8099b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
index e1a1a8b..c6e47c8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
@@ -33,14 +33,13 @@ import 
org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.Closeable;
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 
 /**
  * A Client for the storageContainer protocol.
  */
-public class XceiverClient implements Closeable {
+public class XceiverClient implements XceiverClientSpi {
   static final Logger LOG = LoggerFactory.getLogger(XceiverClient.class);
   private final Pipeline pipeline;
   private final Configuration config;
@@ -61,9 +60,7 @@ public class XceiverClient implements Closeable {
 this.config = config;
   }
 
-  /**
-   * Connects to the leader in the pipeline.
-   */
+  @Override
   public void connect() throws Exception {
 if (channelFuture != null
 && channelFuture.channel() != null
@@ -90,9 +87,6 @@ public class XceiverClient implements Closeable {
 channelFuture = b.connect(leader.getHostName(), port).sync();
   }
 
-  /**
-   * Close the client.
-   */
   @Override
   public void close() {
 if(group != null) {
@@ -104,22 +98,12 @@ public class XceiverClient implements Closeable {
 }
   }
 
-  /**
-   * Returns the pipeline of machines that host the container used by this
-   * client.
-   *
-   * @return pipeline of machines that host the container
-   */
+  @Override
   public Pipeline getPipeline() {
 return pipeline;
   }
 
-  /**
-   * Sends a given command to server and gets the reply back.
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
+  @Override
   public ContainerProtos.ContainerCommandResponseProto sendCommand(
   ContainerProtos.ContainerCommandRequestProto request)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc8099b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
index de706cb..82e7e2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
@@ -96,7 +96,7 @@ public class XceiverClientManager {
* @return XceiverClient connected to a container
* @throws IOException if an XceiverClient cannot be acquired
*/
-  

hadoop git commit: HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by Lukas Majercak and Manoj Govindassamy.

2017-03-09 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4a1187238 -> 60be2e5d8


HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by 
Lukas Majercak and Manoj Govindassamy.

(cherry picked from commit 385d2cb777a0272ac20c62336c944fad295d5d12)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60be2e5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60be2e5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60be2e5d

Branch: refs/heads/branch-2
Commit: 60be2e5d8a1a6a8921c68f8b0f428b55152d05db
Parents: 4a11872
Author: Masatake Iwasaki 
Authored: Thu Mar 9 13:30:33 2017 +0900
Committer: Masatake Iwasaki 
Committed: Thu Mar 9 23:37:04 2017 +0900

--
 .../server/blockmanagement/BlockManager.java| 10 +++-
 .../apache/hadoop/hdfs/TestDecommission.java| 48 ++
 .../hadoop/hdfs/TestMaintenanceState.java   | 51 
 3 files changed, 108 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60be2e5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index dad82d2..5d5706d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -801,7 +801,15 @@ public class BlockManager implements BlockStatsMXBean {
   return false; // already completed (e.g. by syncBlock)
 
 final boolean committed = commitBlock(lastBlock, commitBlock);
-if (countNodes(lastBlock).liveReplicas() >= minReplication) {
+
+// Count replicas on decommissioning nodes, as these will not be
+// decommissioned unless recovery/completing last block has finished
+NumberReplicas numReplicas = countNodes(lastBlock);
+int numUsableReplicas = numReplicas.liveReplicas() +
+numReplicas.decommissioning() +
+numReplicas.liveEnteringMaintenanceReplicas();
+
+if (numUsableReplicas >= minReplication) {
   if (committed) {
 addExpectedReplicasToPending(lastBlock);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60be2e5d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 5551782..b34f047 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -33,6 +33,7 @@ import java.util.concurrent.ExecutionException;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -647,6 +648,53 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 fdos.close();
   }
+
+  @Test(timeout = 36)
+  public void testDecommissionWithOpenFileAndBlockRecovery()
+  throws IOException, InterruptedException {
+startCluster(1, 6);
+getCluster().waitActive();
+
+Path file = new Path("/testRecoveryDecommission");
+
+// Create a file and never close the output stream to trigger recovery
+DistributedFileSystem dfs = getCluster().getFileSystem();
+FSDataOutputStream out = dfs.create(file, true,
+getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 
4096),
+(short) 3, blockSize);
+
+// Write data to the file
+long writtenBytes = 0;
+while (writtenBytes < fileSize) {
+  out.writeLong(writtenBytes);
+  writtenBytes += 8;
+}
+out.hsync();
+
+DatanodeInfo[] lastBlockLocations = NameNodeAdapter.getBlockLocations(
+  getCluster().getNameNode(), "/testRecoveryDecommission", 0, fileSize)
+  .getLastLocatedBlock().getLocations();
+
+// Decommission all nodes of the last block
+

hadoop git commit: HADOOP-13345. S3Guard: Ensure GenericOptionsParser is used for S3Guard CLI

2017-03-09 Thread mackrorysd
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 3a8f307a8 -> b968fb34c


HADOOP-13345. S3Guard: Ensure GenericOptionsParser is used for S3Guard CLI


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b968fb34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b968fb34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b968fb34

Branch: refs/heads/HADOOP-13345
Commit: b968fb34c2c3e8ad9b30c63397733891cd05494e
Parents: 3a8f307
Author: Sean Mackrory 
Authored: Thu Mar 9 07:47:39 2017 -0700
Committer: Sean Mackrory 
Committed: Thu Mar 9 07:47:39 2017 -0700

--
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java  | 33 ++--
 1 file changed, 24 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b968fb34/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index 0fc3a4a..cab989c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.s3a.Constants;
 import org.apache.hadoop.fs.s3a.S3AFileStatus;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.shell.CommandFormat;
+import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.slf4j.Logger;
@@ -59,7 +60,12 @@ public abstract class S3GuardTool extends Configured 
implements Tool {
   private static final String NAME = "s3guard";
   private static final String COMMON_USAGE =
   "When possible and not overridden by more specific options, metadata\n" +
-  "repository information will be inferred from the S3A URL (if provided)";
+  "repository information will be inferred from the S3A URL (if provided)" 
+
+  "\n\n" +
+  "Generic options supported are:\n" +
+  "  -conf  - specify an application configuration file\n" +
+  "  -D 

hadoop git commit: HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by Lukas Majercak and Manoj Govindassamy.

2017-03-09 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk 570827a81 -> 385d2cb77


HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by 
Lukas Majercak and Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/385d2cb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/385d2cb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/385d2cb7

Branch: refs/heads/trunk
Commit: 385d2cb777a0272ac20c62336c944fad295d5d12
Parents: 570827a
Author: Masatake Iwasaki 
Authored: Thu Mar 9 13:30:33 2017 +0900
Committer: Masatake Iwasaki 
Committed: Thu Mar 9 21:13:50 2017 +0900

--
 .../server/blockmanagement/BlockManager.java| 10 +++-
 .../apache/hadoop/hdfs/TestDecommission.java| 48 ++
 .../hadoop/hdfs/TestMaintenanceState.java   | 51 
 3 files changed, 108 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d2cb7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9ec28f9..5dc40fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -891,7 +891,15 @@ public class BlockManager implements BlockStatsMXBean {
   lastBlock.getUnderConstructionFeature()
   .updateStorageScheduledSize((BlockInfoStriped) lastBlock);
 }
-if (hasMinStorage(lastBlock)) {
+
+// Count replicas on decommissioning nodes, as these will not be
+// decommissioned unless recovery/completing last block has finished
+NumberReplicas numReplicas = countNodes(lastBlock);
+int numUsableReplicas = numReplicas.liveReplicas() +
+numReplicas.decommissioning() +
+numReplicas.liveEnteringMaintenanceReplicas();
+
+if (hasMinStorage(lastBlock, numUsableReplicas)) {
   if (committed) {
 addExpectedReplicasToPending(lastBlock);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d2cb7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 94e8946..dc0edcc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -33,6 +33,7 @@ import java.util.concurrent.ExecutionException;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -646,6 +647,53 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 fdos.close();
   }
+
+  @Test(timeout = 36)
+  public void testDecommissionWithOpenFileAndBlockRecovery()
+  throws IOException, InterruptedException {
+startCluster(1, 6);
+getCluster().waitActive();
+
+Path file = new Path("/testRecoveryDecommission");
+
+// Create a file and never close the output stream to trigger recovery
+DistributedFileSystem dfs = getCluster().getFileSystem();
+FSDataOutputStream out = dfs.create(file, true,
+getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 
4096),
+(short) 3, blockSize);
+
+// Write data to the file
+long writtenBytes = 0;
+while (writtenBytes < fileSize) {
+  out.writeLong(writtenBytes);
+  writtenBytes += 8;
+}
+out.hsync();
+
+DatanodeInfo[] lastBlockLocations = NameNodeAdapter.getBlockLocations(
+  getCluster().getNameNode(), "/testRecoveryDecommission", 0, fileSize)
+  .getLastLocatedBlock().getLocations();
+
+// Decommission all nodes of the last block
+ArrayList toDecom = new ArrayList<>();
+for (DatanodeInfo dnDecom : lastBlockLocations) {
+  toDecom.add(dnDecom.getXferAddr());
+}
+initExcludeHosts(toDecom);
+refreshNodes(0);
+
+// Make sure hard lease expires to