[1/2] hadoop git commit: 10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu

2016-10-13 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a6197c1fa -> b60e545a0
  refs/heads/trunk 9454dc5e8 -> 29aa11b1a


10986. DFSAdmin should log detailed error message if any. Contributed by 
MingLiang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29aa11b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29aa11b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29aa11b1

Branch: refs/heads/trunk
Commit: 29aa11b1a252e007ed62fad362096ca43aa408af
Parents: 9454dc5
Author: Brahma Reddy Battula 
Authored: Thu Oct 13 21:39:50 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 13 21:39:50 2016 +0530

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   8 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 106 +--
 2 files changed, 51 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29aa11b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 32401dc..a60f24b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -936,8 +936,7 @@ public class DFSAdmin extends FsShell {
   System.out.println("Balancer bandwidth is " + bandwidth
   + " bytes per second.");
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2207,7 +2206,7 @@ public class DFSAdmin extends FsShell {
   dnProxy.evictWriters();
   System.out.println("Requested writer eviction to datanode " + dn);
 } catch (IOException ioe) {
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2218,8 +2217,7 @@ public class DFSAdmin extends FsShell {
   DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
   System.out.println(dnInfo.getDatanodeLocalReport());
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29aa11b1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b49f73d..dca42ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -79,6 +80,7 @@ public class TestDFSAdmin {
   @Before
   public void setUp() throws Exception {
 conf = new Configuration();
+conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
 restartCluster();
 
 admin = new DFSAdmin();
@@ -116,7 +118,7 @@ public class TestDFSAdmin {
 if (cluster != null) {
   cluster.shutdown();
 }
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 cluster.waitActive();
 datanode = cluster.getDataNodes().get(0);
 namenode = cluster.getNameNode();
@@ -171,70 +173,58 @@ public class TestDFSAdmin {
   @Test(timeout = 3)
   public void testGetDatanodeInfo() throws Exception {
 redirectStream();
-final Configuration dfsConf = new HdfsConfiguration();
-final int numDn = 2;
-
-/* init cluster */
-try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
-.numDataNodes(numDn).build()) {
-
-  miniCluster.waitActive();
-  assertEquals(numDn, miniCluster.getDataNodes().size());
-  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+

[2/2] hadoop git commit: 10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu

2016-10-13 Thread brahma
10986. DFSAdmin should log detailed error message if any. Contributed by 
MingLiang Liu

(cherry picked from commit 29aa11b1a252e007ed62fad362096ca43aa408af)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b60e545a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b60e545a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b60e545a

Branch: refs/heads/branch-2
Commit: b60e545a08140b2e1c268a2310e80d6ed659eb39
Parents: a6197c1
Author: Brahma Reddy Battula 
Authored: Thu Oct 13 21:39:50 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 13 21:41:25 2016 +0530

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   8 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 106 +--
 2 files changed, 51 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b60e545a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index bd3ed15..3d956a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -911,8 +911,7 @@ public class DFSAdmin extends FsShell {
   System.out.println("Balancer bandwidth is " + bandwidth
   + " bytes per second.");
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2178,7 +2177,7 @@ public class DFSAdmin extends FsShell {
   dnProxy.evictWriters();
   System.out.println("Requested writer eviction to datanode " + dn);
 } catch (IOException ioe) {
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2189,8 +2188,7 @@ public class DFSAdmin extends FsShell {
   DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
   System.out.println(dnInfo.getDatanodeLocalReport());
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b60e545a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b49f73d..dca42ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -79,6 +80,7 @@ public class TestDFSAdmin {
   @Before
   public void setUp() throws Exception {
 conf = new Configuration();
+conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
 restartCluster();
 
 admin = new DFSAdmin();
@@ -116,7 +118,7 @@ public class TestDFSAdmin {
 if (cluster != null) {
   cluster.shutdown();
 }
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 cluster.waitActive();
 datanode = cluster.getDataNodes().get(0);
 namenode = cluster.getNameNode();
@@ -171,70 +173,58 @@ public class TestDFSAdmin {
   @Test(timeout = 3)
   public void testGetDatanodeInfo() throws Exception {
 redirectStream();
-final Configuration dfsConf = new HdfsConfiguration();
-final int numDn = 2;
-
-/* init cluster */
-try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
-.numDataNodes(numDn).build()) {
-
-  miniCluster.waitActive();
-  assertEquals(numDn, miniCluster.getDataNodes().size());
-  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+final DFSAdmin dfsAdmin = new DFSAdmin(conf);
 
-  /* 

hadoop git commit: HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN based on client request. Contributed by Xiaoyu Yao.

2016-10-13 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk b371c5636 -> 9097e2efe


HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN 
based on client request. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9097e2ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9097e2ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9097e2ef

Branch: refs/heads/trunk
Commit: 9097e2efe4c92d83c8fab88dc11be84505a6cab5
Parents: b371c56
Author: Xiaoyu Yao 
Authored: Thu Oct 13 10:52:13 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Oct 13 10:52:28 2016 -0700

--
 .../authentication/server/KerberosAuthenticationHandler.java  | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9097e2ef/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index c6d1881..07c2a31 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -343,8 +343,6 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
   authorization = 
authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
   final Base64 base64 = new Base64(0);
   final byte[] clientToken = base64.decode(authorization);
-  final String serverName = InetAddress.getByName(request.getServerName())
-   .getCanonicalHostName();
   try {
 token = Subject.doAs(serverSubject, new 
PrivilegedExceptionAction() {
 
@@ -354,10 +352,7 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
 GSSContext gssContext = null;
 GSSCredential gssCreds = null;
 try {
-  gssCreds = gssManager.createCredential(
-  gssManager.createName(
-  KerberosUtil.getServicePrincipal("HTTP", serverName),
-  KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+  gssCreds = gssManager.createCredential(null,
   GSSCredential.INDEFINITE_LIFETIME,
   new Oid[]{
 KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu [Forced Update!]

2016-10-13 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 29aa11b1a -> 129125404 (forced update)


HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by 
MingLiang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12912540
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12912540
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12912540

Branch: refs/heads/trunk
Commit: 129125404244f35ee63b8f0491a095371685e9ba
Parents: 9454dc5
Author: Brahma Reddy Battula 
Authored: Thu Oct 13 21:39:50 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 13 22:05:00 2016 +0530

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   8 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 106 +--
 2 files changed, 51 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 32401dc..a60f24b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -936,8 +936,7 @@ public class DFSAdmin extends FsShell {
   System.out.println("Balancer bandwidth is " + bandwidth
   + " bytes per second.");
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2207,7 +2206,7 @@ public class DFSAdmin extends FsShell {
   dnProxy.evictWriters();
   System.out.println("Requested writer eviction to datanode " + dn);
 } catch (IOException ioe) {
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2218,8 +2217,7 @@ public class DFSAdmin extends FsShell {
   DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
   System.out.println(dnInfo.getDatanodeLocalReport());
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b49f73d..dca42ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -79,6 +80,7 @@ public class TestDFSAdmin {
   @Before
   public void setUp() throws Exception {
 conf = new Configuration();
+conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
 restartCluster();
 
 admin = new DFSAdmin();
@@ -116,7 +118,7 @@ public class TestDFSAdmin {
 if (cluster != null) {
   cluster.shutdown();
 }
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 cluster.waitActive();
 datanode = cluster.getDataNodes().get(0);
 namenode = cluster.getNameNode();
@@ -171,70 +173,58 @@ public class TestDFSAdmin {
   @Test(timeout = 3)
   public void testGetDatanodeInfo() throws Exception {
 redirectStream();
-final Configuration dfsConf = new HdfsConfiguration();
-final int numDn = 2;
-
-/* init cluster */
-try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
-.numDataNodes(numDn).build()) {
-
-  miniCluster.waitActive();
-  assertEquals(numDn, miniCluster.getDataNodes().size());
-  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+final DFSAdmin dfsAdmin = 

hadoop git commit: HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. Contributed by Xiaobing Zhou.

2016-10-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 129125404 -> b371c5636


HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. 
Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b371c563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b371c563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b371c563

Branch: refs/heads/trunk
Commit: b371c56365c14bbab0f5cdfffc0becaabfde8145
Parents: 1291254
Author: Anu Engineer 
Authored: Thu Oct 13 10:26:07 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 13 10:26:07 2016 -0700

--
 .../server/diskbalancer/TestDiskBalancer.java   | 44 +---
 1 file changed, 11 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b371c563/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index d911e74..9985210 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -44,7 +44,6 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
-import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -137,6 +136,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -144,6 +144,7 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap})
 .build();
 try {
   DataMover dataMover = new DataMover(cluster, dataNodeIndex,
@@ -174,7 +175,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
-
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -182,9 +183,9 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap, cap})
 .build();
 
-
 try {
   DataMover dataMover = new DataMover(cluster, dataNodeIndex,
   sourceDiskIndex, conf, blockSize, blockCount);
@@ -221,6 +222,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -228,6 +230,7 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap})
 .build();
 
 try {
@@ -246,24 +249,6 @@ public class TestDiskBalancer {
   }
 
   /**
-   * Sets alll Disks capacity to size specified.
-   *
-   * @param cluster - DiskBalancerCluster
-   * @param size- new size of the disk
-   */
-  private void setVolumeCapacity(DiskBalancerCluster cluster, long size,
- String diskType) {
-Preconditions.checkNotNull(cluster);
-for (DiskBalancerDataNode node : cluster.getNodes()) {
-  for (DiskBalancerVolume vol :
-  node.getVolumeSets().get(diskType).getVolumes()) {
-vol.setCapacity(size);
-  }
-  node.getVolumeSets().get(diskType).computeVolumeDataDensity();
-}
-  }
-
-  /**
* Helper class that allows us to create different kinds of MiniDFSClusters
* and populate data.
*/
@@ -274,6 +259,7 @@ public class TestDiskBalancer {
 private int fileLen;
 private int blockCount;
 private 

hadoop git commit: HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN based on client request. Contributed by Xiaoyu Yao.

2016-10-13 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b60e545a0 -> 874402a29


HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN 
based on client request. Contributed by Xiaoyu Yao.

(cherry picked from commit 9097e2efe4c92d83c8fab88dc11be84505a6cab5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/874402a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/874402a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/874402a2

Branch: refs/heads/branch-2
Commit: 874402a295d2f601cce43f4d4b084c02387a3b68
Parents: b60e545
Author: Xiaoyu Yao 
Authored: Thu Oct 13 10:52:13 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Oct 13 10:54:51 2016 -0700

--
 .../authentication/server/KerberosAuthenticationHandler.java  | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/874402a2/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index c6d1881..07c2a31 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -343,8 +343,6 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
   authorization = 
authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
   final Base64 base64 = new Base64(0);
   final byte[] clientToken = base64.decode(authorization);
-  final String serverName = InetAddress.getByName(request.getServerName())
-   .getCanonicalHostName();
   try {
 token = Subject.doAs(serverSubject, new 
PrivilegedExceptionAction() {
 
@@ -354,10 +352,7 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
 GSSContext gssContext = null;
 GSSCredential gssCreds = null;
 try {
-  gssCreds = gssManager.createCredential(
-  gssManager.createName(
-  KerberosUtil.getServicePrincipal("HTTP", serverName),
-  KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+  gssCreds = gssManager.createCredential(null,
   GSSCredential.INDEFINITE_LIFETIME,
   new Oid[]{
 KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN based on client request. Contributed by Xiaoyu Yao.

2016-10-13 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 78777d4a9 -> 5abc78882


HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN 
based on client request. Contributed by Xiaoyu Yao.

(cherry picked from commit 9097e2efe4c92d83c8fab88dc11be84505a6cab5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5abc7888
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5abc7888
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5abc7888

Branch: refs/heads/branch-2.8
Commit: 5abc78882590bc3c0dfb71cba2f08fd671bc1e33
Parents: 78777d4
Author: Xiaoyu Yao 
Authored: Thu Oct 13 10:52:13 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Oct 13 10:58:20 2016 -0700

--
 .../authentication/server/KerberosAuthenticationHandler.java  | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5abc7888/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index c6d1881..07c2a31 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -343,8 +343,6 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
   authorization = 
authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
   final Base64 base64 = new Base64(0);
   final byte[] clientToken = base64.decode(authorization);
-  final String serverName = InetAddress.getByName(request.getServerName())
-   .getCanonicalHostName();
   try {
 token = Subject.doAs(serverSubject, new 
PrivilegedExceptionAction() {
 
@@ -354,10 +352,7 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
 GSSContext gssContext = null;
 GSSCredential gssCreds = null;
 try {
-  gssCreds = gssManager.createCredential(
-  gssManager.createName(
-  KerberosUtil.getServicePrincipal("HTTP", serverName),
-  KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+  gssCreds = gssManager.createCredential(null,
   GSSCredential.INDEFINITE_LIFETIME,
   new Oid[]{
 KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception stacktrace. Contributed by Hanisha Koneru.

2016-10-13 Thread arp
HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception 
stacktrace. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecd3467f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecd3467f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecd3467f

Branch: refs/heads/branch-2
Commit: ecd3467f1197167427c64124486aaee3ba24f606
Parents: 874402a
Author: Arpit Agarwal 
Authored: Thu Oct 13 11:37:03 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Oct 13 11:37:12 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecd3467f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
index 505f76d..a2b6980 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
@@ -177,7 +177,8 @@ public abstract class CachingGetSpaceUsed implements 
Closeable, GetSpaceUsed {
   // update the used variable
   spaceUsed.refresh();
 } catch (InterruptedException e) {
-  LOG.warn("Thread Interrupted waiting to refresh disk information", 
e);
+  LOG.warn("Thread Interrupted waiting to refresh disk information: "
+  + e.getMessage());
   Thread.currentThread().interrupt();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception stacktrace. Contributed by Hanisha Koneru.

2016-10-13 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 874402a29 -> ecd3467f1
  refs/heads/branch-2.8 5abc78882 -> 3a9808e96
  refs/heads/trunk 9097e2efe -> 008122b3c


HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception 
stacktrace. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/008122b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/008122b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/008122b3

Branch: refs/heads/trunk
Commit: 008122b3c927767ac96dc876124bc591e10c9df4
Parents: 9097e2e
Author: Arpit Agarwal 
Authored: Thu Oct 13 11:37:03 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Oct 13 11:37:03 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/008122b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
index 505f76d..a2b6980 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
@@ -177,7 +177,8 @@ public abstract class CachingGetSpaceUsed implements 
Closeable, GetSpaceUsed {
   // update the used variable
   spaceUsed.refresh();
 } catch (InterruptedException e) {
-  LOG.warn("Thread Interrupted waiting to refresh disk information", 
e);
+  LOG.warn("Thread Interrupted waiting to refresh disk information: "
+  + e.getMessage());
   Thread.currentThread().interrupt();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception stacktrace. Contributed by Hanisha Koneru.

2016-10-13 Thread arp
HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception 
stacktrace. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a9808e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a9808e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a9808e9

Branch: refs/heads/branch-2.8
Commit: 3a9808e96c734ea4a79dc5b6d5524d349a55fb90
Parents: 5abc788
Author: Arpit Agarwal 
Authored: Thu Oct 13 11:37:03 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Oct 13 11:37:25 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9808e9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
index 505f76d..a2b6980 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
@@ -177,7 +177,8 @@ public abstract class CachingGetSpaceUsed implements 
Closeable, GetSpaceUsed {
   // update the used variable
   spaceUsed.refresh();
 } catch (InterruptedException e) {
-  LOG.warn("Thread Interrupted waiting to refresh disk information", 
e);
+  LOG.warn("Thread Interrupted waiting to refresh disk information: "
+  + e.getMessage());
   Thread.currentThread().interrupt();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk fdce51509 -> 332a61fd7


HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/trunk
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee 
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 13:52:49 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * statistics.
  */
@@ -418,6 +422,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
   iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+  .isRunning()) {
 numNodesChecked++;
 final Map.Entry
 entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int lowRedundancyInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientList == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

2016-10-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ecd3467f1 -> 2e153bc8a


HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by 
Yiqun Lin.

(cherry picked from commit fdce515091f0a61ffd6c9ae464a68447dedf1124)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e153bc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e153bc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e153bc8

Branch: refs/heads/branch-2
Commit: 2e153bc8abc3fe2f50660136af32711c50e7e7e7
Parents: ecd3467f
Author: Andrew Wang 
Authored: Thu Oct 13 11:41:37 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 11:42:03 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e153bc8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..19f3178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
+
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
+waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(int numBlocks)
+  throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks()
+  == numBlocks) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

2016-10-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 3a9808e96 -> 7a5aaa789


HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by 
Yiqun Lin.

(cherry picked from commit fdce515091f0a61ffd6c9ae464a68447dedf1124)
(cherry picked from commit 2e153bc8abc3fe2f50660136af32711c50e7e7e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a5aaa78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a5aaa78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a5aaa78

Branch: refs/heads/branch-2.8
Commit: 7a5aaa789d9160ac55f022aae64a0b437ddccf06
Parents: 3a9808e
Author: Andrew Wang 
Authored: Thu Oct 13 11:41:37 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 11:42:23 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a5aaa78/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..19f3178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
+
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
+waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(int numBlocks)
+  throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks()
+  == numBlocks) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

2016-10-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 008122b3c -> fdce51509


HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdce5150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdce5150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdce5150

Branch: refs/heads/trunk
Commit: fdce515091f0a61ffd6c9ae464a68447dedf1124
Parents: 008122b
Author: Andrew Wang 
Authored: Thu Oct 13 11:41:37 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 11:41:37 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdce5150/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..19f3178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
+
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
+waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(int numBlocks)
+  throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks()
+  == numBlocks) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13024. Distcp with -delete feature on raw data not implemented. Contributed by Mavin Martin.

2016-10-13 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 3892fd810 -> d3c446514


HADOOP-13024. Distcp with -delete feature on raw data not implemented. 
Contributed by Mavin Martin.

(cherry picked from commit 0a85d079838f532a13ca237300386d1b3bc1b178)
(cherry picked from commit ad69baf6a9139c0af81e9f72e41c1e3aeb119ebc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3c44651
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3c44651
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3c44651

Branch: refs/heads/branch-2.8
Commit: d3c4465147c0994fcd8d67adf47635a54f0b58d9
Parents: 3892fd8
Author: Jing Zhao 
Authored: Thu Oct 13 13:24:37 2016 -0700
Committer: Jing Zhao 
Committed: Thu Oct 13 13:28:05 2016 -0700

--
 .../apache/hadoop/tools/DistCpConstants.java| 12 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  5 ++-
 .../hadoop/tools/TestDistCpWithRawXAttrs.java   | 45 +---
 .../hadoop/tools/util/DistCpTestUtils.java  | 32 --
 4 files changed, 56 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3c44651/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 525f4ce..a6eda53 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.tools;
  * limitations under the License.
  */
 
+import org.apache.hadoop.fs.Path;
+
 /**
  * Utility class to hold commonly used constants.
  */
@@ -141,9 +143,17 @@ public class DistCpConstants {
   public static final int SPLIT_RATIO_DEFAULT  = 2;
 
   /**
+   * Constants for NONE file deletion
+   */
+  public static final String NONE_PATH_NAME = "/NONE";
+  public static final Path NONE_PATH = new Path(NONE_PATH_NAME);
+  public static final Path RAW_NONE_PATH = new Path(
+  DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME + NONE_PATH_NAME);
+
+  /**
* Value of reserved raw HDFS directory when copying raw.* xattrs.
*/
-  static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
+  public static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = 
"/.reserved/raw";
 
   static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3c44651/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 6d2fef5..dd653b2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -238,7 +238,10 @@ public class CopyCommitter extends FileOutputCommitter {
 List targets = new ArrayList(1);
 Path targetFinalPath = new 
Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
 targets.add(targetFinalPath);
-DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
+Path resultNonePath = 
Path.getPathWithoutSchemeAndAuthority(targetFinalPath)
+
.toString().startsWith(DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME)
+? DistCpConstants.RAW_NONE_PATH : DistCpConstants.NONE_PATH;
+DistCpOptions options = new DistCpOptions(targets, resultNonePath);
 //
 // Set up options to be the same from the CopyListing.buildListing's 
perspective,
 // so to collect similar listings as when doing the copy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3c44651/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
index 5aef51a..8adc2cf 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
@@ -82,14 +82,7 @@ public class 

hadoop git commit: Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin."

2016-10-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ded91992a -> 3892fd810


Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. 
Contributed by Yiqun Lin."

This reverts commit 7a5aaa789d9160ac55f022aae64a0b437ddccf06.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3892fd81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3892fd81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3892fd81

Branch: refs/heads/branch-2.8
Commit: 3892fd810b637128095b51ff90521171bf9a3fd3
Parents: ded9199
Author: Andrew Wang 
Authored: Thu Oct 13 13:23:01 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 13:23:01 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3892fd81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 19f3178..696b2aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,8 +86,6 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
-DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
-
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -100,7 +98,6 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
-waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -108,7 +105,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -185,7 +182,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -202,8 +199,7 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForNumPendingDeletionBlocks(int numBlocks)
-  throws Exception {
+  private void waitForBlocksToDelete() throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -211,8 +207,7 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks()
-  == numBlocks) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13024. Distcp with -delete feature on raw data not implemented. Contributed by Mavin Martin.

2016-10-13 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 dc78e0e7f -> ad69baf6a


HADOOP-13024. Distcp with -delete feature on raw data not implemented. 
Contributed by Mavin Martin.

(cherry picked from commit 0a85d079838f532a13ca237300386d1b3bc1b178)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad69baf6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad69baf6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad69baf6

Branch: refs/heads/branch-2
Commit: ad69baf6a9139c0af81e9f72e41c1e3aeb119ebc
Parents: dc78e0e
Author: Jing Zhao 
Authored: Thu Oct 13 13:24:37 2016 -0700
Committer: Jing Zhao 
Committed: Thu Oct 13 13:26:02 2016 -0700

--
 .../apache/hadoop/tools/DistCpConstants.java| 12 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  5 ++-
 .../hadoop/tools/TestDistCpWithRawXAttrs.java   | 45 +---
 .../hadoop/tools/util/DistCpTestUtils.java  | 32 --
 4 files changed, 56 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad69baf6/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 525f4ce..a6eda53 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.tools;
  * limitations under the License.
  */
 
+import org.apache.hadoop.fs.Path;
+
 /**
  * Utility class to hold commonly used constants.
  */
@@ -141,9 +143,17 @@ public class DistCpConstants {
   public static final int SPLIT_RATIO_DEFAULT  = 2;
 
   /**
+   * Constants for NONE file deletion
+   */
+  public static final String NONE_PATH_NAME = "/NONE";
+  public static final Path NONE_PATH = new Path(NONE_PATH_NAME);
+  public static final Path RAW_NONE_PATH = new Path(
+  DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME + NONE_PATH_NAME);
+
+  /**
* Value of reserved raw HDFS directory when copying raw.* xattrs.
*/
-  static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
+  public static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = 
"/.reserved/raw";
 
   static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad69baf6/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 6d2fef5..dd653b2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -238,7 +238,10 @@ public class CopyCommitter extends FileOutputCommitter {
 List targets = new ArrayList(1);
 Path targetFinalPath = new 
Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
 targets.add(targetFinalPath);
-DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
+Path resultNonePath = 
Path.getPathWithoutSchemeAndAuthority(targetFinalPath)
+
.toString().startsWith(DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME)
+? DistCpConstants.RAW_NONE_PATH : DistCpConstants.NONE_PATH;
+DistCpOptions options = new DistCpOptions(targets, resultNonePath);
 //
 // Set up options to be the same from the CopyListing.buildListing's 
perspective,
 // so to collect similar listings as when doing the copy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad69baf6/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
index 5aef51a..8adc2cf 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
@@ -82,14 +82,7 @@ public class TestDistCpWithRawXAttrs {
 final String relDst = 

hadoop git commit: Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin."

2016-10-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c5a130370 -> dc78e0e7f


Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. 
Contributed by Yiqun Lin."

This reverts commit 2e153bc8abc3fe2f50660136af32711c50e7e7e7.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc78e0e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc78e0e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc78e0e7

Branch: refs/heads/branch-2
Commit: dc78e0e7fc11d21de47f5ddb81a259988c2113fc
Parents: c5a1303
Author: Andrew Wang 
Authored: Thu Oct 13 13:23:04 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 13:23:04 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc78e0e7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 19f3178..696b2aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,8 +86,6 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
-DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
-
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -100,7 +98,6 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
-waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -108,7 +105,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -185,7 +182,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -202,8 +199,7 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForNumPendingDeletionBlocks(int numBlocks)
-  throws Exception {
+  private void waitForBlocksToDelete() throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -211,8 +207,7 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks()
-  == numBlocks) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin."

2016-10-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 332a61fd7 -> 8c721aa00


Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. 
Contributed by Yiqun Lin."

This reverts commit fdce515091f0a61ffd6c9ae464a68447dedf1124.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c721aa0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c721aa0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c721aa0

Branch: refs/heads/trunk
Commit: 8c721aa00a47a976959e3861ddd742f09db432fc
Parents: 332a61f
Author: Andrew Wang 
Authored: Thu Oct 13 13:23:12 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 13:23:28 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c721aa0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 19f3178..696b2aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,8 +86,6 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
-DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
-
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -100,7 +98,6 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
-waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -108,7 +105,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -185,7 +182,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -202,8 +199,7 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForNumPendingDeletionBlocks(int numBlocks)
-  throws Exception {
+  private void waitForBlocksToDelete() throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -211,8 +207,7 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks()
-  == numBlocks) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5689. Update native services REST API to use agentless docker provider. Contributed by Billie Rinaldi & Gour Saha

2016-10-13 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services 42083da04 -> 1f40ba5bc


YARN-5689. Update native services REST API to use agentless docker provider. 
Contributed by Billie Rinaldi & Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f40ba5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f40ba5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f40ba5b

Branch: refs/heads/yarn-native-services
Commit: 1f40ba5bc7e4242fd43119961ca0d33fa237fa19
Parents: 42083da
Author: Jian He 
Authored: Thu Oct 13 11:34:58 2016 -0700
Committer: Jian He 
Committed: Thu Oct 13 11:34:58 2016 -0700

--
 .../api/impl/ApplicationApiService.java | 251 ++-
 .../yarn/services/utils/RestApiConstants.java   |   3 -
 .../api/impl/TestApplicationApiService.java |   6 +-
 3 files changed, 79 insertions(+), 181 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f40ba5b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index 9645696..0a62629 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -50,7 +50,6 @@ import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 
-import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.SerializationUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -68,10 +67,12 @@ import 
org.apache.hadoop.yarn.services.resource.Configuration;
 import org.apache.hadoop.yarn.services.resource.Container;
 import org.apache.hadoop.yarn.services.resource.ContainerState;
 import org.apache.hadoop.yarn.services.resource.Resource;
+import org.apache.slider.api.OptionKeys;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.StateValues;
 import org.apache.slider.client.SliderClient;
 import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.params.ActionCreateArgs;
 import org.apache.slider.common.params.ActionFlexArgs;
 import org.apache.slider.common.params.ActionFreezeArgs;
@@ -88,12 +89,11 @@ import org.apache.slider.core.exceptions.NotFoundException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
 import org.apache.slider.core.registry.docstore.ConfigFormat;
+import org.apache.slider.providers.docker.DockerKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonArray;
 import com.google.gson.JsonElement;
 import com.google.gson.JsonNull;
 import com.google.gson.JsonObject;
@@ -211,7 +211,8 @@ public class ApplicationApiService implements 
ApplicationApi {
   application.setConfiguration(new Configuration());
 }
 addPropertyToConfiguration(application.getConfiguration(),
-PROPERTY_COMPONENT_TYPE, COMPONENT_TYPE_EXTERNAL);
+SliderKeys.COMPONENT_TYPE_KEY,
+SliderKeys.COMPONENT_TYPE_EXTERNAL_APP);
   }
   // resource
   validateApplicationResource(application.getResource(), null, application
@@ -249,7 +250,8 @@ public class ApplicationApiService implements 
ApplicationApi {
 comp.setConfiguration(new Configuration());
   }
   addPropertyToConfiguration(comp.getConfiguration(),
-  PROPERTY_COMPONENT_TYPE, COMPONENT_TYPE_EXTERNAL);
+  SliderKeys.COMPONENT_TYPE_KEY,
+  SliderKeys.COMPONENT_TYPE_EXTERNAL_APP);
   compNameArtifactIdMap.put(comp.getName(), 
comp.getArtifact().getId());
   comp.setName(comp.getArtifact().getId());
 }
@@ -339,9 +341,9 @@ public class ApplicationApiService implements 
ApplicationApi {
 final ActionCreateArgs createArgs = new 

hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e153bc8a -> c5a130370


HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.

(cherry picked from commit 332a61fd74fd2a9874319232c583ab5d2c53ff03)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5a13037
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5a13037
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5a13037

Branch: refs/heads/branch-2
Commit: c5a13037048eb1e3b5a500aeec0e2e953e7d509a
Parents: 2e153bc
Author: Kihwal Lee 
Authored: Thu Oct 13 14:55:22 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 14:55:22 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 34 
 1 file changed, 28 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5a13037/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 78b6a20..10e4c96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -388,17 +388,12 @@ public class DecommissionManager {
  */
 private final int numBlocksPerCheck;
 /**
-<<< HEAD
  * The maximum number of nodes to check per tick.
  */
 private final int numNodesPerCheck;
 /**
  * The maximum number of nodes to track in decomNodeBlocks. A value of 0
  * means no limit.
-===
- * The maximum number of nodes to track in outOfServiceNodeBlocks.
- * A value of 0 means no limit.
->>> 9dcbdbd... HDFS-9392. Admins support for maintenance state. 
Contributed by Ming Ma.
  */
 private final int maxConcurrentTrackedNodes;
 /**
@@ -406,6 +401,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * testing.
  */
@@ -443,6 +442,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -478,7 +478,8 @@ public class DecommissionManager {
 
   while (it.hasNext()
   && !exceededNumBlocksPerCheck()
-  && !exceededNumNodesPerCheck()) {
+  && !exceededNumNodesPerCheck()
+  && namesystem.isRunning()) {
 numNodesChecked++;
 final Map.Entry
 entry = it.next();
@@ -608,7 +609,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int underReplicatedInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientlyReplicated == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, 

hadoop git commit: HADOOP-13024. Distcp with -delete feature on raw data not implemented. Contributed by Mavin Martin.

2016-10-13 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8c721aa00 -> 0a85d0798


HADOOP-13024. Distcp with -delete feature on raw data not implemented. 
Contributed by Mavin Martin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a85d079
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a85d079
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a85d079

Branch: refs/heads/trunk
Commit: 0a85d079838f532a13ca237300386d1b3bc1b178
Parents: 8c721aa
Author: Jing Zhao 
Authored: Thu Oct 13 13:24:37 2016 -0700
Committer: Jing Zhao 
Committed: Thu Oct 13 13:24:54 2016 -0700

--
 .../apache/hadoop/tools/DistCpConstants.java| 12 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  5 ++-
 .../hadoop/tools/TestDistCpWithRawXAttrs.java   | 45 +---
 .../hadoop/tools/util/DistCpTestUtils.java  | 32 --
 4 files changed, 56 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 96f364c..6171aa9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.tools;
  * limitations under the License.
  */
 
+import org.apache.hadoop.fs.Path;
+
 /**
  * Utility class to hold commonly used constants.
  */
@@ -125,9 +127,17 @@ public class DistCpConstants {
   public static final int SPLIT_RATIO_DEFAULT  = 2;
 
   /**
+   * Constants for NONE file deletion
+   */
+  public static final String NONE_PATH_NAME = "/NONE";
+  public static final Path NONE_PATH = new Path(NONE_PATH_NAME);
+  public static final Path RAW_NONE_PATH = new Path(
+  DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME + NONE_PATH_NAME);
+
+  /**
* Value of reserved raw HDFS directory when copying raw.* xattrs.
*/
-  static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
+  public static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = 
"/.reserved/raw";
 
   static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 6d2fef5..dd653b2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -238,7 +238,10 @@ public class CopyCommitter extends FileOutputCommitter {
 List targets = new ArrayList(1);
 Path targetFinalPath = new 
Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
 targets.add(targetFinalPath);
-DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
+Path resultNonePath = 
Path.getPathWithoutSchemeAndAuthority(targetFinalPath)
+
.toString().startsWith(DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME)
+? DistCpConstants.RAW_NONE_PATH : DistCpConstants.NONE_PATH;
+DistCpOptions options = new DistCpOptions(targets, resultNonePath);
 //
 // Set up options to be the same from the CopyListing.buildListing's 
perspective,
 // so to collect similar listings as when doing the copy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
index 5aef51a..8adc2cf 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
@@ -82,14 +82,7 @@ public class TestDistCpWithRawXAttrs {
 final String relDst = "/./.reserved/../.reserved/raw/../raw/dest/../dest";
 doTestPreserveRawXAttrs(relSrc, relDst, 

hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7a5aaa789 -> ded91992a


HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.

(cherry picked from commit c5a13037048eb1e3b5a500aeec0e2e953e7d509a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ded91992
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ded91992
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ded91992

Branch: refs/heads/branch-2.8
Commit: ded91992adc08c5ac5cff00abcb9f05c148d8daa
Parents: 7a5aaa7
Author: Kihwal Lee 
Authored: Thu Oct 13 14:57:44 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 14:57:44 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ded91992/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 073332b..be4771d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -355,6 +355,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * testing.
  */
@@ -392,6 +396,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -426,7 +431,8 @@ public class DecommissionManager {
 
   while (it.hasNext()
   && !exceededNumBlocksPerCheck()
-  && !exceededNumNodesPerCheck()) {
+  && !exceededNumNodesPerCheck()
+  && namesystem.isRunning()) {
 numNodesChecked++;
 final Map.Entry
 entry = it.next();
@@ -544,7 +550,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int underReplicatedInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientlyReplicated == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/51] [abbrv] hadoop git commit: HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.

2016-10-13 Thread aengineer
HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3059b251
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3059b251
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3059b251

Branch: refs/heads/HDFS-7240
Commit: 3059b251d8f37456c5761ecaf73fe6c0c5a59067
Parents: be3cb10
Author: Wei-Chiu Chuang 
Authored: Fri Oct 7 10:17:50 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Oct 7 10:17:50 2016 -0700

--
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 2482 --
 1 file changed, 1152 insertions(+), 1330 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/51] [abbrv] hadoop git commit: HADOOP-13692. hadoop-aws should declare explicit dependency on Jackson 2 jars to prevent classpath conflicts. Contributed by Chris Nauroth.

2016-10-13 Thread aengineer
HADOOP-13692. hadoop-aws should declare explicit dependency on Jackson 2 jars 
to prevent classpath conflicts. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69620f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69620f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69620f95

Branch: refs/heads/HDFS-7240
Commit: 69620f955997250d1b543d86d4907ee50218152a
Parents: 3059b25
Author: Chris Nauroth 
Authored: Fri Oct 7 11:41:19 2016 -0700
Committer: Chris Nauroth 
Committed: Fri Oct 7 11:41:19 2016 -0700

--
 hadoop-tools/hadoop-aws/pom.xml | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69620f95/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 49b0379..1c1bb02 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -286,6 +286,18 @@
   compile
 
 
+  com.fasterxml.jackson.core
+  jackson-core
+
+
+  com.fasterxml.jackson.core
+  jackson-databind
+
+
+  com.fasterxml.jackson.core
+  jackson-annotations
+
+
   joda-time
   joda-time
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/51] [abbrv] hadoop git commit: HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.

2016-10-13 Thread aengineer
HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3565c9af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3565c9af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3565c9af

Branch: refs/heads/HDFS-7240
Commit: 3565c9af17ab05bf9e7f68b71b6c6850df772bb9
Parents: 69620f95
Author: Kihwal Lee 
Authored: Fri Oct 7 14:14:47 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 7 14:15:59 2016 -0500

--
 .../hdfs/server/namenode/FSDirDeleteOp.java | 63 ++--
 .../hdfs/server/namenode/FSEditLogLoader.java   | 11 ++--
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 3 files changed, 38 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3565c9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 21ee3ce..328ce79 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -55,7 +55,7 @@ class FSDirDeleteOp {
 FSNamesystem fsn = fsd.getFSNamesystem();
 fsd.writeLock();
 try {
-  if (deleteAllowed(iip, iip.getPath()) ) {
+  if (deleteAllowed(iip)) {
 List snapshottableDirs = new ArrayList<>();
 FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
 ReclaimContext context = new ReclaimContext(
@@ -98,20 +98,24 @@ class FSDirDeleteOp {
 FSDirectory fsd = fsn.getFSDirectory();
 FSPermissionChecker pc = fsd.getPermissionChecker();
 
-final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
-src = iip.getPath();
-if (!recursive && fsd.isNonEmptyDirectory(iip)) {
-  throw new PathIsNotEmptyDirectoryException(src + " is non empty");
+if (FSDirectory.isExactReservedName(src)) {
+  throw new InvalidPathException(src);
 }
+
+final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
 if (fsd.isPermissionEnabled()) {
   fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
   FsAction.ALL, true);
 }
-if (recursive && fsd.isNonEmptyDirectory(iip)) {
-  checkProtectedDescendants(fsd, src);
+if (fsd.isNonEmptyDirectory(iip)) {
+  if (!recursive) {
+throw new PathIsNotEmptyDirectoryException(
+iip.getPath() + " is non empty");
+  }
+  checkProtectedDescendants(fsd, iip);
 }
 
-return deleteInternal(fsn, src, iip, logRetryCache);
+return deleteInternal(fsn, iip, logRetryCache);
   }
 
   /**
@@ -126,17 +130,14 @@ class FSDirDeleteOp {
* @param src a string representation of a path to an inode
* @param mtime the time the inode is removed
*/
-  static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
+  static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)
   throws IOException {
 assert fsd.hasWriteLock();
 FSNamesystem fsn = fsd.getFSNamesystem();
 BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
 List removedINodes = new ChunkedArrayList<>();
 List removedUCFiles = new ChunkedArrayList<>();
-
-final INodesInPath iip = fsd.getINodesInPath4Write(
-FSDirectory.normalizePath(src), false);
-if (!deleteAllowed(iip, src)) {
+if (!deleteAllowed(iip)) {
   return;
 }
 List snapshottableDirs = new ArrayList<>();
@@ -162,7 +163,6 @@ class FSDirDeleteOp {
* 
* For small directory or file the deletion is done in one shot.
* @param fsn namespace
-   * @param src path name to be deleted
* @param iip the INodesInPath instance containing all the INodes for the 
path
* @param logRetryCache whether to record RPC ids in editlog for retry cache
*  rebuilding
@@ -170,15 +170,11 @@ class FSDirDeleteOp {
* @throws IOException
*/
   static BlocksMapUpdateInfo deleteInternal(
-  FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache)
+  FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
   throws IOException {
 assert fsn.hasWriteLock();
 if (NameNode.stateChangeLog.isDebugEnabled()) {
-  NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
-}
-
-if (FSDirectory.isExactReservedName(src)) {
-  throw new 

[47/51] [abbrv] hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-13 Thread aengineer
HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/HDFS-7240
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee 
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 13:52:49 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * statistics.
  */
@@ -418,6 +422,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
   iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+  .isRunning()) {
 numNodesChecked++;
 final Map.Entry
 entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int lowRedundancyInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientList == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/51] [abbrv] hadoop git commit: MAPREDUCE-6780. Add support for HDFS directory with erasure code policy to TeraGen and TeraSort. Contributed by Sammi Chen

2016-10-13 Thread aengineer
MAPREDUCE-6780. Add support for HDFS directory with erasure code policy to 
TeraGen and TeraSort. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bea004ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bea004ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bea004ea

Branch: refs/heads/HDFS-7240
Commit: bea004eaeb7ba33bf324ef3e7065cfdd614d8198
Parents: ec0b707
Author: Kai Zheng 
Authored: Sun Oct 9 15:33:26 2016 +0600
Committer: Kai Zheng 
Committed: Sun Oct 9 15:33:26 2016 +0600

--
 .../hadoop/examples/terasort/TeraGen.java   |  3 +++
 .../examples/terasort/TeraOutputFormat.java | 20 +---
 .../hadoop/examples/terasort/TeraSort.java  |  3 +++
 3 files changed, 23 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index 22fe344..7fbb22a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -246,6 +246,9 @@ public class TeraGen extends Configured implements Tool {
 
   private static void usage() throws IOException {
 System.err.println("teragen  ");
+System.err.println("If you want to generate data and store them as " +
+"erasure code striping file, just make sure that the parent dir " +
+"of  has erasure code policy set");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
index fd3ea78..73c446d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.examples.terasort;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -40,6 +42,7 @@ import org.apache.hadoop.mapreduce.security.TokenCache;
  * An output format that writes the key and value appended together.
  */
 public class TeraOutputFormat extends FileOutputFormat {
+  private static final Log LOG = LogFactory.getLog(TeraOutputFormat.class);
   private OutputCommitter committer = null;
 
   /**
@@ -74,10 +77,22 @@ public class TeraOutputFormat extends 
FileOutputFormat {
   out.write(key.getBytes(), 0, key.getLength());
   out.write(value.getBytes(), 0, value.getLength());
 }
-
+
 public void close(TaskAttemptContext context) throws IOException {
   if (finalSync) {
-out.hsync();
+try {
+  out.hsync();
+} catch (UnsupportedOperationException e) {
+  /*
+   * Currently, hsync operation on striping file with erasure code
+   * policy is not supported yet. So this is a workaround to make
+   * teragen and terasort to support directory with striping files. In
+   * future, if the hsync operation is supported on striping file, this
+   * workaround should be removed.
+   */
+  LOG.info("Operation hsync is not supported so far on path with " +
+  "erasure code policy set");
+}
   }
   out.close();
 }
@@ -135,5 +150,4 @@ public class TeraOutputFormat extends 
FileOutputFormat {
 }
 return committer;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java

[01/51] [abbrv] hadoop git commit: HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.

2016-10-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ef84ac469 -> 841742cdd


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3059b251/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 4b7eebd..aa41e9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -57,8 +57,11 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import com.google.common.base.Supplier;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
@@ -74,7 +77,6 @@ import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
@@ -116,44 +118,49 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 import org.apache.log4j.RollingFileAppender;
+import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.collect.Sets;
 
 /**
- * A JUnit test for doing fsck
+ * A JUnit test for doing fsck.
  */
 public class TestFsck {
+  private static final Log LOG =
+  LogFactory.getLog(TestFsck.class.getName());
+
   static final String AUDITLOG_FILE =
   GenericTestUtils.getTempPath("TestFsck-audit.log");
   
   // Pattern for: 
   // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
-  static final Pattern fsckPattern = Pattern.compile(
+  static final Pattern FSCK_PATTERN = Pattern.compile(
   "allowed=.*?\\s" +
   "ugi=.*?\\s" + 
   "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
   "cmd=fsck\\ssrc=\\/\\sdst=null\\s" + 
   "perm=null\\s" + "proto=.*");
-  static final Pattern getfileinfoPattern = Pattern.compile(
+  static final Pattern GET_FILE_INFO_PATTERN = Pattern.compile(
   "allowed=.*?\\s" +
   "ugi=.*?\\s" + 
   "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
   "cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" + 
   "perm=null\\s" + "proto=.*");
 
-  static final Pattern numMissingBlocksPattern = Pattern.compile(
+  static final Pattern NUM_MISSING_BLOCKS_PATTERN = Pattern.compile(
   ".*Missing blocks:\t\t([0123456789]*).*");
 
-  static final Pattern numCorruptBlocksPattern = Pattern.compile(
+  static final Pattern NUM_CORRUPT_BLOCKS_PATTERN = Pattern.compile(
   ".*Corrupt blocks:\t\t([0123456789]*).*");
   
   private static final String LINE_SEPARATOR =
-System.getProperty("line.separator");
+  System.getProperty("line.separator");
 
   static String runFsck(Configuration conf, int expectedErrCode, 
-boolean checkErrorCode,String... path)
+boolean checkErrorCode, String... path)
 throws Exception {
 ByteArrayOutputStream bStream = new ByteArrayOutputStream();
 PrintStream out = new PrintStream(bStream, true);
@@ -163,60 +170,72 @@ public class TestFsck {
   assertEquals(expectedErrCode, errCode);
 }
 GenericTestUtils.setLogLevel(FSPermissionChecker.LOG, Level.INFO);
-FSImage.LOG.info("OUTPUT = " + bStream.toString());
+LOG.info("OUTPUT = " + bStream.toString());
 return bStream.toString();
   }
 
-  /** do fsck */
+  private MiniDFSCluster cluster = null;
+  private Configuration conf = null;
+
+  @Before
+  public void setUp() throws Exception {
+conf = new Configuration();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+shutdownCluster();
+  }
+
+  private void shutdownCluster() throws Exception {
+if (cluster != null) {
+  cluster.shutdown();
+}
+  }
+
+  /** do fsck. */
   @Test
   public void testFsck() throws Exception {
 DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
 setNumFiles(20).build();
-MiniDFSCluster cluster = null;
 FileSystem fs = null;
-try {
-  Configuration conf = new HdfsConfiguration();
-  final long precision = 1L;
-  

[18/51] [abbrv] hadoop git commit: HADOOP-13699. Configuration does not substitute multiple references to the same var.

2016-10-13 Thread aengineer
HADOOP-13699. Configuration does not substitute multiple references to the same 
var.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03060075
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03060075
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03060075

Branch: refs/heads/HDFS-7240
Commit: 03060075c53a2cecfbf5f60b6fc77afecf64ace5
Parents: 3441c74
Author: Andrew Wang 
Authored: Mon Oct 10 12:19:26 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 10 12:19:26 2016 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   | 23 ++-
 .../apache/hadoop/conf/TestConfiguration.java   | 24 
 2 files changed, 16 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03060075/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 1e8ed50..dbbc8ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -943,10 +943,15 @@ public class Configuration implements 
Iterable>,
*
* If var is unbounded the current state of expansion "prefix${var}suffix" is
* returned.
-   *
-   * If a cycle is detected: replacing var1 requires replacing var2 ... 
requires
-   * replacing var1, i.e., the cycle is shorter than
-   * {@link Configuration#MAX_SUBST} then the original expr is returned.
+   * 
+   * This function also detects self-referential substitutions, i.e.
+   * 
+   *   {@code
+   *   foo.bar = ${foo.bar}
+   *   }
+   * 
+   * If a cycle is detected then the original expr is returned. Loops
+   * involving multiple substitutions are not detected.
*
* @param expr the literal value of a config key
* @return null if expr is null, otherwise the value resulting from expanding
@@ -959,7 +964,6 @@ public class Configuration implements 
Iterable>,
   return null;
 }
 String eval = expr;
-Set evalSet = null;
 for(int s = 0; s < MAX_SUBST; s++) {
   final int[] varBounds = findSubVariable(eval);
   if (varBounds[SUB_START_IDX] == -1) {
@@ -1004,15 +1008,12 @@ public class Configuration implements 
Iterable>,
 return eval; // return literal ${var}: var is unbound
   }
 
-  // prevent recursive resolution
-  //
   final int dollar = varBounds[SUB_START_IDX] - "${".length();
   final int afterRightBrace = varBounds[SUB_END_IDX] + "}".length();
   final String refVar = eval.substring(dollar, afterRightBrace);
-  if (evalSet == null) {
-evalSet = new HashSet();
-  }
-  if (!evalSet.add(refVar)) {
+
+  // detect self-referential values
+  if (val.contains(refVar)) {
 return expr; // return original expression if there is a loop
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03060075/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 917ccbc..17112f5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -168,6 +168,9 @@ public class TestConfiguration extends TestCase {
 declareProperty("my.fullfile", "${my.base}/${my.file}${my.suffix}", 
"/tmp/hadoop_user/hello.txt");
 // check that undefined variables are returned as-is
 declareProperty("my.failsexpand", "a${my.undefvar}b", "a${my.undefvar}b");
+// check that multiple variable references are resolved
+declareProperty("my.user.group", "${user.name} ${user.name}",
+"hadoop_user hadoop_user");
 endConfig();
 Path fileResource = new Path(CONFIG);
 mock.addResource(fileResource);
@@ -1508,7 +1511,7 @@ public class TestConfiguration extends TestCase {
 }
   }
 
-  public void testInvalidSubstitutation() {
+  public void testInvalidSubstitution() {
 final Configuration configuration = new Configuration(false);
 
 // 2-var loops
@@ -1522,25 +1525,6 @@ public 

[28/51] [abbrv] hadoop git commit: HADOOP-13697. LogLevel#main should not throw exception if no arguments. Contributed by Mingliang Liu

2016-10-13 Thread aengineer
HADOOP-13697. LogLevel#main should not throw exception if no arguments. 
Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fb392a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fb392a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fb392a5

Branch: refs/heads/HDFS-7240
Commit: 2fb392a587d288b628936ca6d18fabad04afc585
Parents: 809cfd2
Author: Mingliang Liu 
Authored: Fri Oct 7 14:05:40 2016 -0700
Committer: Mingliang Liu 
Committed: Tue Oct 11 10:57:08 2016 -0700

--
 .../src/main/java/org/apache/hadoop/log/LogLevel.java   | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fb392a5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 4fa839f..79eae12 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -47,15 +47,17 @@ import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * Change log level in runtime.
  */
 @InterfaceStability.Evolving
 public class LogLevel {
-  public static final String USAGES = "\nUsage: General options are:\n"
+  public static final String USAGES = "\nUsage: Command options are:\n"
   + "\t[-getlevel   [-protocol (http|https)]\n"
   + "\t[-setlevel"
   + "[-protocol (http|https)]\n";
@@ -67,7 +69,7 @@ public class LogLevel {
*/
   public static void main(String[] args) throws Exception {
 CLI cli = new CLI(new Configuration());
-System.exit(cli.run(args));
+System.exit(ToolRunner.run(cli, args));
   }
 
   /**
@@ -81,6 +83,7 @@ public class LogLevel {
 
   private static void printUsage() {
 System.err.println(USAGES);
+GenericOptionsParser.printGenericCommandUsage(System.err);
   }
 
   public static boolean isValidProtocol(String protocol) {
@@ -107,7 +110,7 @@ public class LogLevel {
 sendLogLevelRequest();
   } catch (HadoopIllegalArgumentException e) {
 printUsage();
-throw e;
+return -1;
   }
   return 0;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/51] [abbrv] hadoop git commit: YARN-5057. Resourcemanager.security.TestDelegationTokenRenewer fails in trunk. Contributed by Jason Lowe.

2016-10-13 Thread aengineer
YARN-5057. Resourcemanager.security.TestDelegationTokenRenewer fails in trunk. 
Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0773ffd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0773ffd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0773ffd0

Branch: refs/heads/HDFS-7240
Commit: 0773ffd0f8383384f8cf8599476565f78aae70c9
Parents: 669d6f1
Author: Naganarasimha 
Authored: Mon Oct 10 18:04:47 2016 -0400
Committer: Naganarasimha 
Committed: Mon Oct 10 18:04:47 2016 -0400

--
 .../security/TestDelegationTokenRenewer.java| 24 
 1 file changed, 19 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0773ffd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index 5dfee89..205188b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -1148,17 +1148,21 @@ public class TestDelegationTokenRenewer {
 credentials, null, true, false, false, null, 0, null, false, null);
 MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
 rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
+DelegationTokenRenewer renewer =
+rm.getRMContext().getDelegationTokenRenewer();
+DelegationTokenToRenew dttr = renewer.getAllTokens().get(token1);
+Assert.assertNotNull(dttr);
 
 // submit app2 with the same token, set cancelTokenWhenComplete to true;
 RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2,
 credentials, null, true, false, false, null, 0, null, true, null);
 MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
 rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING);
-MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2);
+finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr);
 Assert.assertTrue(rm.getRMContext().getDelegationTokenRenewer()
   .getAllTokens().containsKey(token1));
 
-MockRM.finishAMAndVerifyAppState(app1, rm, nm1, am1);
+finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr);
 // app2 completes, app1 is still running, check the token is not cancelled
 Assert.assertFalse(Renewer.cancelled);
   }
@@ -1224,7 +1228,7 @@ public class TestDelegationTokenRenewer {
 Assert.assertTrue(dttr.referringAppIds.contains(app2.getApplicationId()));
 Assert.assertFalse(Renewer.cancelled);
 
-MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2);
+finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr);
 // app2 completes, app1 is still running, check the token is not cancelled
 Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
 Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
@@ -1242,14 +1246,14 @@ public class TestDelegationTokenRenewer {
 Assert.assertFalse(dttr.isTimerCancelled());
 Assert.assertFalse(Renewer.cancelled);
 
-MockRM.finishAMAndVerifyAppState(app1, rm, nm1, am1);
+finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr);
 Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
 Assert.assertFalse(dttr.referringAppIds.contains(app1.getApplicationId()));
 Assert.assertTrue(dttr.referringAppIds.contains(app3.getApplicationId()));
 Assert.assertFalse(dttr.isTimerCancelled());
 Assert.assertFalse(Renewer.cancelled);
 
-MockRM.finishAMAndVerifyAppState(app3, rm, nm1, am3);
+finishAMAndWaitForComplete(app3, rm, nm1, am3, dttr);
 Assert.assertFalse(renewer.getAllTokens().containsKey(token1));
 Assert.assertTrue(dttr.referringAppIds.isEmpty());
 Assert.assertTrue(dttr.isTimerCancelled());
@@ -1259,4 +1263,14 @@ public class TestDelegationTokenRenewer {
 Assert.assertFalse(renewer.getDelegationTokens().contains(token1));
   }
 
+  private void finishAMAndWaitForComplete(final 

[21/51] [abbrv] hadoop git commit: HDFS-10985. o.a.h.ha.TestZKFailoverController should not use fixed time sleep before assertions. Contributed by Mingliang Liu

2016-10-13 Thread aengineer
HDFS-10985. o.a.h.ha.TestZKFailoverController should not use fixed time sleep 
before assertions. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c874fa91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c874fa91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c874fa91

Branch: refs/heads/HDFS-7240
Commit: c874fa914dfbf07d1731f5e87398607366675879
Parents: b963818
Author: Mingliang Liu 
Authored: Fri Oct 7 17:03:08 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Oct 10 13:33:07 2016 -0700

--
 .../hadoop/ha/TestZKFailoverController.java | 34 
 1 file changed, 21 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c874fa91/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index 164167c..846c8ae 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.*;
 
 import java.security.NoSuchAlgorithmException;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@@ -441,12 +442,16 @@ public class TestZKFailoverController extends 
ClientBaseWithFixes {
 cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
 cluster.waitForActiveLockHolder(0);
 
-Thread.sleep(1); // allow to quiesce
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return cluster.getService(0).fenceCount == 0 &&
+cluster.getService(1).fenceCount == 0 &&
+cluster.getService(0).activeTransitionCount == 2 &&
+cluster.getService(1).activeTransitionCount == 1;
+  }
+}, 100, 60 * 1000);
 
-assertEquals(0, cluster.getService(0).fenceCount);
-assertEquals(0, cluster.getService(1).fenceCount);
-assertEquals(2, cluster.getService(0).activeTransitionCount);
-assertEquals(1, cluster.getService(1).activeTransitionCount);
   }
 
   @Test
@@ -590,14 +595,17 @@ public class TestZKFailoverController extends 
ClientBaseWithFixes {
 cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
 cluster.waitForActiveLockHolder(0);
 
-Thread.sleep(1); // allow to quiesce
-
-assertEquals(0, cluster.getService(0).fenceCount);
-assertEquals(0, cluster.getService(1).fenceCount);
-assertEquals(0, cluster.getService(2).fenceCount);
-assertEquals(2, cluster.getService(0).activeTransitionCount);
-assertEquals(1, cluster.getService(1).activeTransitionCount);
-assertEquals(1, cluster.getService(2).activeTransitionCount);
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return cluster.getService(0).fenceCount == 0 &&
+cluster.getService(1).fenceCount == 0 &&
+cluster.getService(2).fenceCount == 0 &&
+cluster.getService(0).activeTransitionCount == 2 &&
+cluster.getService(1).activeTransitionCount == 1 &&
+cluster.getService(2).activeTransitionCount == 1;
+  }
+}, 100, 60 * 1000);
   }
 
   private int runFC(DummyHAService target, String ... args) throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/51] [abbrv] hadoop git commit: HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)

2016-10-13 Thread aengineer
HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by 
java.io.File. (Virajith Jalaparti via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96b12662
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96b12662
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96b12662

Branch: refs/heads/HDFS-7240
Commit: 96b12662ea76e3ded4ef13944fc8df206cfb4613
Parents: 0773ffd
Author: Lei Xu 
Authored: Mon Oct 10 15:28:19 2016 -0700
Committer: Lei Xu 
Committed: Mon Oct 10 15:30:03 2016 -0700

--
 .../hadoop/hdfs/server/common/Storage.java  |  22 ++
 .../server/datanode/BlockPoolSliceStorage.java  |  20 +-
 .../hdfs/server/datanode/BlockScanner.java  |   8 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  34 +-
 .../hdfs/server/datanode/DataStorage.java   |  34 +-
 .../hdfs/server/datanode/DirectoryScanner.java  | 320 +--
 .../hdfs/server/datanode/DiskBalancer.java  |  25 +-
 .../hdfs/server/datanode/LocalReplica.java  |   2 +-
 .../hdfs/server/datanode/ReplicaInfo.java   |   2 +-
 .../hdfs/server/datanode/StorageLocation.java   |  32 +-
 .../hdfs/server/datanode/VolumeScanner.java |  27 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   5 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  | 234 +-
 .../impl/FsDatasetAsyncDiskService.java |  40 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 136 
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 233 --
 .../fsdataset/impl/FsVolumeImplBuilder.java |  65 
 .../datanode/fsdataset/impl/FsVolumeList.java   |  44 +--
 .../impl/RamDiskAsyncLazyPersistService.java|  79 +++--
 .../fsdataset/impl/VolumeFailureInfo.java   |  13 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../TestNameNodePrunesMissingStorages.java  |  15 +-
 .../server/datanode/SimulatedFSDataset.java |  46 ++-
 .../hdfs/server/datanode/TestBlockScanner.java  |   3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  15 +-
 .../datanode/TestDataNodeVolumeFailure.java |  12 +-
 .../TestDataNodeVolumeFailureReporting.java |  10 +
 .../server/datanode/TestDirectoryScanner.java   |  76 +++--
 .../hdfs/server/datanode/TestDiskError.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |  10 +-
 .../datanode/extdataset/ExternalVolumeImpl.java |  44 ++-
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |   9 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |  69 ++--
 .../fsdataset/impl/TestFsVolumeList.java|  83 +++--
 .../TestDiskBalancerWithMockMover.java  |   4 +-
 35 files changed, 1062 insertions(+), 713 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 9218e9d..e55de35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.apache.hadoop.util.ToolRunner;
@@ -269,11 +270,17 @@ public abstract class Storage extends StorageInfo {
 
 private String storageUuid = null;  // Storage directory identifier.
 
+private final StorageLocation location;
 public StorageDirectory(File dir) {
   // default dirType is null
   this(dir, null, false);
 }
 
+public StorageDirectory(StorageLocation location) {
+  // default dirType is null
+  this(location.getFile(), null, false, location);
+}
+
 public StorageDirectory(File dir, StorageDirType dirType) {
   this(dir, dirType, false);
 }
@@ -294,11 +301,22 @@ public abstract class Storage extends StorageInfo {
  *  disables locking on the storage directory, false enables 
locking
  */
 public StorageDirectory(File dir, StorageDirType dirType, boolean 
isShared) {
+  this(dir, dirType, isShared, null);
+}
+
+public StorageDirectory(File dir, 

[11/51] [abbrv] hadoop git commit: Merge branch 'trunk' into HADOOP-12756

2016-10-13 Thread aengineer
Merge branch 'trunk' into HADOOP-12756


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a57bba47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a57bba47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a57bba47

Branch: refs/heads/HDFS-7240
Commit: a57bba470b396c163baef7ac9447c063180ec15b
Parents: 26d5df3 6a38d11
Author: Kai Zheng 
Authored: Sun Oct 9 10:29:40 2016 +0800
Committer: Kai Zheng 
Committed: Sun Oct 9 10:29:40 2016 +0800

--
 .../IncludePublicAnnotationsJDiffDoclet.java|64 +
 .../util/RolloverSignerSecretProvider.java  | 2 +-
 .../util/TestZKSignerSecretProvider.java|   221 +-
 .../dev-support/findbugsExcludeFile.xml | 5 +
 .../jdiff/Apache_Hadoop_Common_2.7.2.xml| 41149 ++---
 .../org/apache/hadoop/conf/ConfServlet.java |19 +-
 .../org/apache/hadoop/conf/Configuration.java   |   284 +-
 .../apache/hadoop/fs/DFCachingGetSpaceUsed.java |48 +
 .../src/main/java/org/apache/hadoop/fs/DU.java  | 8 +-
 .../apache/hadoop/fs/FileEncryptionInfo.java|21 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |13 +-
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java | 6 +-
 .../apache/hadoop/fs/permission/AclEntry.java   |24 +-
 .../hadoop/fs/permission/AclEntryScope.java | 2 +-
 .../hadoop/fs/permission/AclEntryType.java  |23 +-
 .../apache/hadoop/fs/permission/AclStatus.java  | 2 +-
 .../org/apache/hadoop/fs/shell/AclCommands.java | 6 +-
 .../hadoop/fs/shell/CommandWithDestination.java | 5 +-
 .../org/apache/hadoop/fs/viewfs/ViewFs.java | 2 +-
 .../java/org/apache/hadoop/io/BloomMapFile.java |11 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java | 9 +-
 .../main/java/org/apache/hadoop/io/MapFile.java |10 +-
 .../java/org/apache/hadoop/io/SequenceFile.java |16 +-
 .../apache/hadoop/io/compress/BZip2Codec.java   | 9 +-
 .../apache/hadoop/io/compress/DefaultCodec.java | 9 +-
 .../apache/hadoop/io/compress/GzipCodec.java| 9 +-
 .../hadoop/io/file/tfile/Compression.java   |14 +-
 .../org/apache/hadoop/ipc/ExternalCall.java |91 +
 .../main/java/org/apache/hadoop/ipc/Server.java |88 +-
 .../org/apache/hadoop/net/NetworkTopology.java  | 2 +-
 .../apache/hadoop/net/SocksSocketFactory.java   | 4 +-
 .../org/apache/hadoop/security/Credentials.java | 8 +-
 .../hadoop/security/KerberosAuthException.java  |   118 +
 .../hadoop/security/UGIExceptionMessages.java   |46 +
 .../hadoop/security/UserGroupInformation.java   |   105 +-
 .../org/apache/hadoop/security/token/Token.java |60 +-
 .../java/org/apache/hadoop/util/LineReader.java | 6 +-
 .../org/apache/hadoop/util/SysInfoWindows.java  |58 +-
 .../java/org/apache/hadoop/util/hash/Hash.java  | 6 +-
 .../src/main/resources/core-default.xml | 6 +-
 .../src/site/markdown/FileSystemShell.md| 3 +-
 .../src/site/markdown/filesystem/filesystem.md  |77 +-
 .../org/apache/hadoop/conf/TestConfServlet.java |   122 +-
 .../apache/hadoop/conf/TestConfiguration.java   |   140 +-
 .../apache/hadoop/fs/FileContextURIBase.java| 4 +-
 .../hadoop/fs/TestDFCachingGetSpaceUsed.java|75 +
 .../hadoop/fs/TestDelegationTokenRenewer.java   | 3 +-
 .../hadoop/fs/TestFileSystemInitialization.java |12 +-
 .../AbstractContractRootDirectoryTest.java  |34 +-
 .../fs/contract/AbstractFSContractTestBase.java | 2 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |48 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |85 +
 .../org/apache/hadoop/net/ServerSocketUtil.java |23 +
 .../security/TestUserGroupInformation.java  |33 +-
 .../apache/hadoop/util/TestSysInfoWindows.java  | 7 +-
 .../hadoop/crypto/key/kms/server/KMS.java   |76 +-
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 2 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   |76 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  | 4 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 9 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java|   146 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |30 +
 .../hdfs/client/CreateEncryptionZoneFlag.java   |70 +
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|   536 +
 .../apache/hadoop/hdfs/client/HdfsUtils.java|86 +
 .../apache/hadoop/hdfs/client/package-info.java |27 +
 .../server/datanode/DiskBalancerWorkItem.java   | 2 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java|88 +-
 .../hdfs/web/resources/AclPermissionParam.java  |23 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  | 1 -
 .../jdiff/Apache_Hadoop_HDFS_2.7.2.xml  | 21704 +
 .../src/contrib/bkjournal/README.txt|66 -
 

[08/51] [abbrv] hadoop git commit: HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get counted twice. Contributed by Sean Mackrory.

2016-10-13 Thread aengineer
HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get 
counted twice. Contributed by Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a38d118
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a38d118
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a38d118

Branch: refs/heads/HDFS-7240
Commit: 6a38d118d86b7907009bcec34f1b788d076f1d1c
Parents: e57fa81
Author: Xiao Chen 
Authored: Fri Oct 7 17:30:30 2016 -0700
Committer: Xiao Chen 
Committed: Fri Oct 7 17:37:15 2016 -0700

--
 .../ContentSummaryComputationContext.java   |  94 -
 .../hadoop/hdfs/server/namenode/INode.java  |   1 +
 .../hdfs/server/namenode/INodeDirectory.java|  11 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../hdfs/server/namenode/INodeReference.java|   2 +
 .../hdfs/server/namenode/INodeSymlink.java  |   1 +
 .../snapshot/DirectorySnapshottableFeature.java |   9 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |  14 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   1 +
 .../snapshot/TestRenameWithSnapshots.java   | 199 +++
 10 files changed, 307 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 6df9e75..4208b53 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -21,6 +21,10 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+
+import java.util.HashSet;
+import java.util.Set;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -35,6 +39,8 @@ public class ContentSummaryComputationContext {
   private long yieldCount = 0;
   private long sleepMilliSec = 0;
   private int sleepNanoSec = 0;
+  private Set includedNodes = new HashSet<>();
+  private Set deletedSnapshottedNodes = new HashSet<>();
 
   /**
* Constructor
@@ -51,8 +57,8 @@ public class ContentSummaryComputationContext {
 this.fsn = fsn;
 this.limitPerRun = limitPerRun;
 this.nextCountLimit = limitPerRun;
-this.counts = new ContentCounts.Builder().build();
-this.snapshotCounts = new ContentCounts.Builder().build();
+setCounts(new ContentCounts.Builder().build());
+setSnapshotCounts(new ContentCounts.Builder().build());
 this.sleepMilliSec = sleepMicroSec/1000;
 this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
   }
@@ -82,6 +88,7 @@ public class ContentSummaryComputationContext {
 }
 
 // Have we reached the limit?
+ContentCounts counts = getCounts();
 long currentCount = counts.getFileCount() +
 counts.getSymlinkCount() +
 counts.getDirectoryCount() +
@@ -123,14 +130,22 @@ public class ContentSummaryComputationContext {
   }
 
   /** Get the content counts */
-  public ContentCounts getCounts() {
+  public synchronized ContentCounts getCounts() {
 return counts;
   }
 
+  private synchronized void setCounts(ContentCounts counts) {
+this.counts = counts;
+  }
+
   public ContentCounts getSnapshotCounts() {
 return snapshotCounts;
   }
 
+  private void setSnapshotCounts(ContentCounts snapshotCounts) {
+this.snapshotCounts = snapshotCounts;
+  }
+
   public BlockStoragePolicySuite getBlockStoragePolicySuite() {
 Preconditions.checkState((bsps != null || fsn != null),
 "BlockStoragePolicySuite must be either initialized or available via" +
@@ -138,4 +153,77 @@ public class ContentSummaryComputationContext {
 return (bsps != null) ? bsps:
 fsn.getBlockManager().getStoragePolicySuite();
   }
+
+  /**
+   * If the node is an INodeReference, resolves it to the actual inode.
+   * Snapshot diffs represent renamed / moved files as different
+   * INodeReferences, but the underlying INode it refers to is consistent.
+   *
+   * @param node
+   * @return The referred INode if there is one, else returns the input
+   * unmodified.
+   

[26/51] [abbrv] hadoop git commit: YARN-5551. Ignore file backed pages from memory computation when smaps is enabled. Contributed by Rajesh Balamohan

2016-10-13 Thread aengineer
YARN-5551. Ignore file backed pages from memory computation when smaps is 
enabled. Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecb51b85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecb51b85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecb51b85

Branch: refs/heads/HDFS-7240
Commit: ecb51b857ac7faceff981b2b6f22ea1af0d42ab1
Parents: 96b1266
Author: Jason Lowe 
Authored: Tue Oct 11 15:12:43 2016 +
Committer: Jason Lowe 
Committed: Tue Oct 11 15:12:43 2016 +

--
 .../yarn/util/ProcfsBasedProcessTree.java   | 26 ++-
 .../yarn/util/TestProcfsBasedProcessTree.java   | 46 ++--
 2 files changed, 39 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecb51b85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 80d49c3..29bc277 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -406,15 +406,14 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 continue;
   }
 
-  total +=
-  Math.min(info.sharedDirty, info.pss) + info.privateDirty
-  + info.privateClean;
+  // Account for anonymous to know the amount of
+  // memory reclaimable by killing the process
+  total += info.anonymous;
+
   if (LOG.isDebugEnabled()) {
 LOG.debug(" total(" + olderThanAge + "): PID : " + p.getPid()
-+ ", SharedDirty : " + info.sharedDirty + ", PSS : "
-+ info.pss + ", Private_Dirty : " + info.privateDirty
-+ ", Private_Clean : " + info.privateClean + ", total : "
-+ (total * KB_TO_BYTES));
++ ", info : " + info.toString()
++ ", total : " + (total * KB_TO_BYTES));
   }
 }
   }
@@ -877,6 +876,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 private int sharedDirty;
 private int privateClean;
 private int privateDirty;
+private int anonymous;
 private int referenced;
 private String regionName;
 private String permission;
@@ -929,6 +929,10 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   return referenced;
 }
 
+public int getAnonymous() {
+  return anonymous;
+}
+
 public void setMemInfo(String key, String value) {
   MemInfo info = MemInfo.getMemInfoByName(key);
   int val = 0;
@@ -969,6 +973,9 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   case REFERENCED:
 referenced = val;
 break;
+  case ANONYMOUS:
+anonymous = val;
+break;
   default:
 break;
   }
@@ -999,10 +1006,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 .append(MemInfo.REFERENCED.name + ":" + this.getReferenced())
 .append(" kB\n");
   sb.append("\t")
-.append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
-.append(" kB\n");
-  sb.append("\t")
-.append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
+.append(MemInfo.ANONYMOUS.name + ":" + this.getAnonymous())
 .append(" kB\n");
   return sb.toString();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecb51b85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index fa4e8c8..841d333 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ 

[27/51] [abbrv] hadoop git commit: HDFS-10916. Switch from "raw" to "system" xattr namespace for erasure coding policy. (Andrew Wang via lei)

2016-10-13 Thread aengineer
HDFS-10916. Switch from "raw" to "system" xattr namespace for erasure coding 
policy. (Andrew Wang via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/809cfd27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/809cfd27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/809cfd27

Branch: refs/heads/HDFS-7240
Commit: 809cfd27a30900d2c0e0e133574de49d0b4538cf
Parents: ecb51b8
Author: Lei Xu 
Authored: Tue Oct 11 10:04:46 2016 -0700
Committer: Lei Xu 
Committed: Tue Oct 11 10:04:46 2016 -0700

--
 .../org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/809cfd27/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 3798394..d112a48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -369,7 +369,7 @@ public interface HdfsServerConstants {
   String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
   "security.hdfs.unreadable.by.superuser";
   String XATTR_ERASURECODING_POLICY =
-  "raw.hdfs.erasurecoding.policy";
+  "system.hdfs.erasurecoding.policy";
 
   long BLOCK_GROUP_INDEX_MASK = 15;
   byte MAX_BLOCKS_IN_GROUP = 16;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/51] [abbrv] hadoop git commit: HADOOP-13701. AbstractContractRootDirectoryTest can fail when handling delete "/". Contributed by Genmao Yu

2016-10-13 Thread aengineer
HADOOP-13701. AbstractContractRootDirectoryTest can fail when handling delete 
"/". Contributed by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c31b5e61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c31b5e61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c31b5e61

Branch: refs/heads/HDFS-7240
Commit: c31b5e61b1f09949548116309218a2b3e9c0beda
Parents: a57bba4
Author: Kai Zheng 
Authored: Sat Oct 8 17:35:59 2016 +0600
Committer: Kai Zheng 
Committed: Sat Oct 8 17:35:59 2016 +0600

--
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  | 39 +++-
 1 file changed, 38 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c31b5e61/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 81e038d..3b266c8 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 
@@ -53,6 +54,7 @@ public class AliyunOSSFileSystem extends FileSystem {
   private static final Logger LOG =
   LoggerFactory.getLogger(AliyunOSSFileSystem.class);
   private URI uri;
+  private String bucket;
   private Path workingDir;
   private AliyunOSSFileSystemStore store;
   private int maxKeys;
@@ -124,11 +126,20 @@ public class AliyunOSSFileSystem extends FileSystem {
   private boolean innerDelete(FileStatus status, boolean recursive)
   throws IOException {
 Path f = status.getPath();
+String p = f.toUri().getPath();
+FileStatus[] statuses;
+// indicating root directory "/".
+if (p.equals("/")) {
+  statuses = listStatus(status.getPath());
+  boolean isEmptyDir = statuses.length <= 0;
+  return rejectRootDirectoryDelete(isEmptyDir, recursive);
+}
+
 String key = pathToKey(f);
 if (status.isDirectory()) {
   if (!recursive) {
-FileStatus[] statuses = listStatus(status.getPath());
 // Check whether it is an empty directory or not
+statuses = listStatus(status.getPath());
 if (statuses.length > 0) {
   throw new IOException("Cannot remove directory " + f +
   ": It is not empty!");
@@ -148,6 +159,31 @@ public class AliyunOSSFileSystem extends FileSystem {
 return true;
   }
 
+  /**
+   * Implements the specific logic to reject root directory deletion.
+   * The caller must return the result of this call, rather than
+   * attempt to continue with the delete operation: deleting root
+   * directories is never allowed. This method simply implements
+   * the policy of when to return an exit code versus raise an exception.
+   * @param isEmptyDir empty directory or not
+   * @param recursive recursive flag from command
+   * @return a return code for the operation
+   * @throws PathIOException if the operation was explicitly rejected.
+   */
+  private boolean rejectRootDirectoryDelete(boolean isEmptyDir,
+  boolean recursive) throws IOException {
+LOG.info("oss delete the {} root directory of {}", bucket, recursive);
+if (isEmptyDir) {
+  return true;
+}
+if (recursive) {
+  return false;
+} else {
+  // reject
+  throw new PathIOException(bucket, "Cannot delete root path");
+}
+  }
+
   private void createFakeDirectoryIfNecessary(Path f) throws IOException {
 String key = pathToKey(f);
 if (StringUtils.isNotEmpty(key) && !exists(f)) {
@@ -226,6 +262,7 @@ public class AliyunOSSFileSystem extends FileSystem {
   public void initialize(URI name, Configuration conf) throws IOException {
 super.initialize(name, conf);
 
+bucket = name.getHost();
 uri = java.net.URI.create(name.getScheme() + "://" + name.getAuthority());
 workingDir = new Path("/user",
 System.getProperty("user.name")).makeQualified(uri, null);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/51] [abbrv] hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

2016-10-13 Thread aengineer
HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdce5150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdce5150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdce5150

Branch: refs/heads/HDFS-7240
Commit: fdce515091f0a61ffd6c9ae464a68447dedf1124
Parents: 008122b
Author: Andrew Wang 
Authored: Thu Oct 13 11:41:37 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 11:41:37 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdce5150/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..19f3178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
+
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
+waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(int numBlocks)
+  throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks()
+  == numBlocks) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/51] [abbrv] hadoop git commit: HADOOP-13698. Document caveat for KeyShell when underlying KeyProvider does not delete a key.

2016-10-13 Thread aengineer
HADOOP-13698. Document caveat for KeyShell when underlying KeyProvider does not 
delete a key.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b84c4891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b84c4891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b84c4891

Branch: refs/heads/HDFS-7240
Commit: b84c4891f9eca8d56593e48e9df88be42e24220d
Parents: 3c9a010
Author: Xiao Chen 
Authored: Tue Oct 11 17:05:00 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 11 17:05:00 2016 -0700

--
 .../hadoop-common/src/site/markdown/CommandsManual.md| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b84c4891/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 4d7d504..2ece71a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -202,7 +202,9 @@ Manage keys via the KeyProvider. For details on 
KeyProviders, see the [Transpare
 
 Providers frequently require that a password or other secret is supplied. If 
the provider requires a password and is unable to find one, it will use a 
default password and emit a warning message that the default password is being 
used. If the `-strict` flag is supplied, the warning message becomes an error 
message and the command returns immediately with an error status.
 
-NOTE: Some KeyProviders (e.g. 
org.apache.hadoop.crypto.key.JavaKeyStoreProvider) does not support uppercase 
key names.
+NOTE: Some KeyProviders (e.g. 
org.apache.hadoop.crypto.key.JavaKeyStoreProvider) do not support uppercase key 
names.
+
+NOTE: Some KeyProviders do not directly execute a key deletion (e.g. performs 
a soft-delete instead, or delay the actual deletion, to prevent mistake). In 
these cases, one may encounter errors when creating/deleting a key with the 
same name after deleting it. Please check the underlying KeyProvider for 
details.
 
 ### `trace`
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/51] [abbrv] hadoop git commit: HADOOP-13700. Remove unthrown IOException from TrashPolicy#initialize and #getInstance signatures.

2016-10-13 Thread aengineer
HADOOP-13700. Remove unthrown IOException from TrashPolicy#initialize and 
#getInstance signatures.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d739a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d739a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d739a3

Branch: refs/heads/HDFS-7240
Commit: 12d739a34ba868b3f7f5adf7f37a60d4aca9061b
Parents: 85cd06f
Author: Andrew Wang 
Authored: Wed Oct 12 15:19:52 2016 -0700
Committer: Andrew Wang 
Committed: Wed Oct 12 15:19:52 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/TrashPolicy.java| 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d739a3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index 157b9ab..2fe3fd1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -53,9 +53,8 @@ public abstract class TrashPolicy extends Configured {
* not assume trash always under /user/$USER due to HDFS encryption zone.
* @param conf the configuration to be used
* @param fs the filesystem to be used
-   * @throws IOException
*/
-  public void initialize(Configuration conf, FileSystem fs) throws IOException{
+  public void initialize(Configuration conf, FileSystem fs) {
 throw new UnsupportedOperationException();
   }
 
@@ -137,8 +136,7 @@ public abstract class TrashPolicy extends Configured {
* @param fs the file system to be used
* @return an instance of TrashPolicy
*/
-  public static TrashPolicy getInstance(Configuration conf, FileSystem fs)
-  throws IOException {
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs) {
 Class trashClass = conf.getClass(
 "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
 TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/51] [abbrv] hadoop git commit: HADOOP-13627. Have an explicit KerberosAuthException for UGI to throw, text from public constants. Contributed by Xiao Chen.

2016-10-13 Thread aengineer
HADOOP-13627. Have an explicit KerberosAuthException for UGI to throw, text 
from public constants. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e853be6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e853be6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e853be6

Branch: refs/heads/HDFS-7240
Commit: 2e853be6577a5b98fd860e6d64f89ca6d160514a
Parents: 3565c9a
Author: Xiao Chen 
Authored: Fri Oct 7 13:46:27 2016 -0700
Committer: Xiao Chen 
Committed: Fri Oct 7 13:46:27 2016 -0700

--
 .../hadoop/security/KerberosAuthException.java  | 118 +++
 .../hadoop/security/UGIExceptionMessages.java   |  46 
 .../hadoop/security/UserGroupInformation.java   |  74 +++-
 3 files changed, 209 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853be6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
new file mode 100644
index 000..811c7c9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import static org.apache.hadoop.security.UGIExceptionMessages.*;
+
+import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Thrown when {@link UserGroupInformation} failed with an unrecoverable error,
+ * such as failure in kerberos login/logout, invalid subject etc.
+ *
+ * Caller should not retry when catching this exception.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class KerberosAuthException extends IOException {
+  static final long serialVersionUID = 31L;
+
+  private String user;
+  private String principal;
+  private String keytabFile;
+  private String ticketCacheFile;
+  private String initialMessage;
+
+  public KerberosAuthException(String msg) {
+super(msg);
+  }
+
+  public KerberosAuthException(Throwable cause) {
+super(cause);
+  }
+
+  public KerberosAuthException(String initialMsg, Throwable cause) {
+this(cause);
+initialMessage = initialMsg;
+  }
+
+  public void setUser(final String u) {
+user = u;
+  }
+
+  public void setPrincipal(final String p) {
+principal = p;
+  }
+
+  public void setKeytabFile(final String k) {
+keytabFile = k;
+  }
+
+  public void setTicketCacheFile(final String t) {
+ticketCacheFile = t;
+  }
+
+  /** @return The initial message, or null if not set. */
+  public String getInitialMessage() {
+return initialMessage;
+  }
+
+  /** @return The keytab file path, or null if not set. */
+  public String getKeytabFile() {
+return keytabFile;
+  }
+
+  /** @return The principal, or null if not set. */
+  public String getPrincipal() {
+return principal;
+  }
+
+  /** @return The ticket cache file path, or null if not set. */
+  public String getTicketCacheFile() {
+return ticketCacheFile;
+  }
+
+  /** @return The user, or null if not set. */
+  public String getUser() {
+return user;
+  }
+
+  @Override
+  public String getMessage() {
+final StringBuilder sb = new StringBuilder();
+if (initialMessage != null) {
+  sb.append(initialMessage);
+}
+if (user != null) {
+  sb.append(FOR_USER + user);
+}
+if (principal != null) {
+  sb.append(FOR_PRINCIPAL + principal);
+}
+if (keytabFile != null) {
+  sb.append(FROM_KEYTAB + keytabFile);
+}
+if (ticketCacheFile != null) {
+  sb.append(USING_TICKET_CACHE_FILE+ 

[19/51] [abbrv] hadoop git commit: HADOOP-13669. KMS Server should log exceptions before throwing. Contributed by Suraj Acharya.

2016-10-13 Thread aengineer
HADOOP-13669. KMS Server should log exceptions before throwing. Contributed by 
Suraj Acharya.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65912e40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65912e40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65912e40

Branch: refs/heads/HDFS-7240
Commit: 65912e4027548868ebefd8ee36eb00fa889704a7
Parents: 0306007
Author: Xiao Chen 
Authored: Mon Oct 10 12:49:19 2016 -0700
Committer: Xiao Chen 
Committed: Mon Oct 10 12:51:12 2016 -0700

--
 .../hadoop/crypto/key/kms/server/KMS.java   | 711 ++-
 1 file changed, 392 insertions(+), 319 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65912e40/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index 371f3f5..d8755ec 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -104,89 +104,101 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   @SuppressWarnings("unchecked")
   public Response createKey(Map jsonKey) throws Exception {
-LOG.trace("Entering createKey Method.");
-KMSWebApp.getAdminCallsMeter().mark();
-UserGroupInformation user = HttpUserGroupInformation.get();
-final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
-KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
-assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
-String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
-final String material = (String) 
jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
-int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
- ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
-String description = (String)
-jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
-LOG.debug("Creating key with name {}, cipher being used{}, " +
-"length of key {}, description of key {}", name, cipher,
- length, description);
-Map attributes = (Map)
-jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
-if (material != null) {
-  assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
-  KMSOp.CREATE_KEY, name);
-}
-final KeyProvider.Options options = new KeyProvider.Options(
-KMSWebApp.getConfiguration());
-if (cipher != null) {
-  options.setCipher(cipher);
-}
-if (length != 0) {
-  options.setBitLength(length);
-}
-options.setDescription(description);
-options.setAttributes(attributes);
-
-KeyProvider.KeyVersion keyVersion = user.doAs(
-new PrivilegedExceptionAction() {
-  @Override
-  public KeyVersion run() throws Exception {
-KeyProvider.KeyVersion keyVersion = (material != null)
-  ? provider.createKey(name, Base64.decodeBase64(material), 
options)
-  : provider.createKey(name, options);
-provider.flush();
-return keyVersion;
+try{
+  LOG.trace("Entering createKey Method.");
+  KMSWebApp.getAdminCallsMeter().mark();
+  UserGroupInformation user = HttpUserGroupInformation.get();
+  final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
+  KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
+  assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
+  String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
+  final String material;
+  material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
+  int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
+   ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
+  String description = (String)
+  jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
+  LOG.debug("Creating key with name {}, cipher being used{}, " +
+  "length of key {}, description of key {}", name, cipher,
+   length, description);
+  Map attributes = (Map)
+  jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
+  if (material != null) {
+assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
+KMSOp.CREATE_KEY, name);
+  }
+  final KeyProvider.Options options 

[48/51] [abbrv] hadoop git commit: Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin."

2016-10-13 Thread aengineer
Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. 
Contributed by Yiqun Lin."

This reverts commit fdce515091f0a61ffd6c9ae464a68447dedf1124.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c721aa0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c721aa0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c721aa0

Branch: refs/heads/HDFS-7240
Commit: 8c721aa00a47a976959e3861ddd742f09db432fc
Parents: 332a61f
Author: Andrew Wang 
Authored: Thu Oct 13 13:23:12 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 13:23:28 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c721aa0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 19f3178..696b2aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,8 +86,6 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
-DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
-
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -100,7 +98,6 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
-waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -108,7 +105,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -185,7 +182,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -202,8 +199,7 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForNumPendingDeletionBlocks(int numBlocks)
-  throws Exception {
+  private void waitForBlocksToDelete() throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -211,8 +207,7 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks()
-  == numBlocks) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/51] [abbrv] hadoop git commit: HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception stacktrace. Contributed by Hanisha Koneru.

2016-10-13 Thread aengineer
HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception 
stacktrace. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/008122b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/008122b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/008122b3

Branch: refs/heads/HDFS-7240
Commit: 008122b3c927767ac96dc876124bc591e10c9df4
Parents: 9097e2e
Author: Arpit Agarwal 
Authored: Thu Oct 13 11:37:03 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Oct 13 11:37:03 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/008122b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
index 505f76d..a2b6980 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
@@ -177,7 +177,8 @@ public abstract class CachingGetSpaceUsed implements 
Closeable, GetSpaceUsed {
   // update the used variable
   spaceUsed.refresh();
 } catch (InterruptedException e) {
-  LOG.warn("Thread Interrupted waiting to refresh disk information", 
e);
+  LOG.warn("Thread Interrupted waiting to refresh disk information: "
+  + e.getMessage());
   Thread.currentThread().interrupt();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/51] [abbrv] hadoop git commit: HDFS-10984. Expose nntop output as metrics. Contributed by Siddharth Wagle.

2016-10-13 Thread aengineer
HDFS-10984. Expose nntop output as metrics. Contributed by Siddharth Wagle.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61f0490a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61f0490a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61f0490a

Branch: refs/heads/HDFS-7240
Commit: 61f0490a73085bbaf6639d9234277e59dc1145db
Parents: dacd3ec
Author: Xiaoyu Yao 
Authored: Tue Oct 11 15:55:02 2016 -0700
Committer: Xiaoyu Yao 
Committed: Tue Oct 11 15:55:02 2016 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  |  6 ++
 .../server/namenode/top/metrics/TopMetrics.java | 67 ++--
 .../server/namenode/metrics/TestTopMetrics.java | 63 ++
 3 files changed, 129 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2471dc8..b9b02ef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -89,6 +89,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.monotonicNow;
+import static 
org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics.TOPMETRICS_METRICS_SOURCE_NAME;
 
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
@@ -989,6 +990,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 // Add audit logger to calculate top users
 if (topConf.isEnabled) {
   topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs);
+  if (DefaultMetricsSystem.instance().getSource(
+  TOPMETRICS_METRICS_SOURCE_NAME) == null) {
+
DefaultMetricsSystem.instance().register(TOPMETRICS_METRICS_SOURCE_NAME,
+"Top N operations by user", topMetrics);
+  }
   auditLoggers.add(new TopAuditLogger(topMetrics));
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index ab55392..2719c88 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -17,24 +17,32 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top.metrics;
 
-import java.net.InetAddress;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
 import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
+import 
org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
+import 
org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.net.InetAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
 import static 
org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
 
 /**
@@ -58,8 +66,11 @@ import static 

[29/51] [abbrv] hadoop git commit: HADOOP-13684. Snappy may complain Hadoop is built without snappy if libhadoop is not found. Contributed by Wei-Chiu Chuang.

2016-10-13 Thread aengineer
HADOOP-13684. Snappy may complain Hadoop is built without snappy if libhadoop 
is not found. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b32b142
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b32b142
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b32b142

Branch: refs/heads/HDFS-7240
Commit: 4b32b1420d98ea23460d05ae94f2698109b3d6f7
Parents: 2fb392a
Author: Wei-Chiu Chuang 
Authored: Tue Oct 11 13:21:33 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Oct 11 13:21:33 2016 -0700

--
 .../apache/hadoop/io/compress/SnappyCodec.java  | 30 +++-
 1 file changed, 16 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b32b142/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 2a9c5d0..20a4cd6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -60,20 +60,22 @@ public class SnappyCodec implements Configurable, 
CompressionCodec, DirectDecomp
* Are the native snappy libraries loaded & initialized?
*/
   public static void checkNativeCodeLoaded() {
-  if (!NativeCodeLoader.isNativeCodeLoaded() ||
-  !NativeCodeLoader.buildSupportsSnappy()) {
-throw new RuntimeException("native snappy library not available: " +
-"this version of libhadoop was built without " +
-"snappy support.");
-  }
-  if (!SnappyCompressor.isNativeCodeLoaded()) {
-throw new RuntimeException("native snappy library not available: " +
-"SnappyCompressor has not been loaded.");
-  }
-  if (!SnappyDecompressor.isNativeCodeLoaded()) {
-throw new RuntimeException("native snappy library not available: " +
-"SnappyDecompressor has not been loaded.");
-  }
+if (!NativeCodeLoader.buildSupportsSnappy()) {
+  throw new RuntimeException("native snappy library not available: " +
+  "this version of libhadoop was built without " +
+  "snappy support.");
+}
+if (!NativeCodeLoader.isNativeCodeLoaded()) {
+  throw new RuntimeException("Failed to load libhadoop.");
+}
+if (!SnappyCompressor.isNativeCodeLoaded()) {
+  throw new RuntimeException("native snappy library not available: " +
+  "SnappyCompressor has not been loaded.");
+}
+if (!SnappyDecompressor.isNativeCodeLoaded()) {
+  throw new RuntimeException("native snappy library not available: " +
+  "SnappyDecompressor has not been loaded.");
+}
   }
   
   public static boolean isNativeCodeLoaded() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[51/51] [abbrv] hadoop git commit: Fixing merge conflicts due to HDFS-10637

2016-10-13 Thread aengineer
Fixing merge conflicts due to HDFS-10637


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/841742cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/841742cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/841742cd

Branch: refs/heads/HDFS-7240
Commit: 841742cdd5717febfd58670e6f2d0ccc303a7eee
Parents: 7d70e57
Author: Anu Engineer 
Authored: Thu Oct 13 15:29:52 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 13 15:29:52 2016 -0700

--
 .../ozone/container/common/impl/ContainerLocationManagerImpl.java  | 2 +-
 .../apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/841742cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
index deb35f2..6dbd593 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
@@ -71,7 +71,7 @@ public class ContainerLocationManagerImpl implements 
ContainerLocationManager {
 references = this.dataset.getFsVolumeReferences();
 for (int ndx = 0; ndx < references.size(); ndx++) {
   FsVolumeSpi vol = references.get(ndx);
-  pathList.add(Paths.get(vol.getBasePath()));
+  pathList.add(Paths.get(vol.getBaseURI().getPath()));
 }
 references.close();
 volumePaths = pathList.toArray(new Path[pathList.size()]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/841742cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index fc3e0cf..b8fd696 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -160,7 +160,7 @@ public class OzoneContainer {
 references = dataset.getFsVolumeReferences();
 for (int ndx = 0; ndx < references.size(); ndx++) {
   FsVolumeSpi vol = references.get(ndx);
-  pathList.add(Paths.get(vol.getBasePath()));
+  pathList.add(Paths.get(vol.getBaseURI().getPath()));
 }
 references.close();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/51] [abbrv] hadoop git commit: YARN-4464. Lower the default max applications stored in the RM and store. (Daniel Templeton via kasha)

2016-10-13 Thread aengineer
YARN-4464. Lower the default max applications stored in the RM and store. 
(Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6378845f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6378845f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6378845f

Branch: refs/heads/HDFS-7240
Commit: 6378845f9ef789c3fda862c43bcd498aa3f35068
Parents: 7ba7092
Author: Karthik Kambatla 
Authored: Tue Oct 11 21:41:58 2016 -0700
Committer: Karthik Kambatla 
Committed: Tue Oct 11 21:42:08 2016 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 20 
 .../src/main/resources/yarn-default.xml |  4 ++--
 .../server/resourcemanager/RMAppManager.java|  2 +-
 3 files changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 4d43357..3bd0dcc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -719,17 +719,29 @@ public class YarnConfiguration extends Configuration {
   + "leveldb-state-store.compaction-interval-secs";
   public static final long DEFAULT_RM_LEVELDB_COMPACTION_INTERVAL_SECS = 3600;
 
-  /** The maximum number of completed applications RM keeps. */ 
+  /**
+   * The maximum number of completed applications RM keeps. By default equals
+   * to {@link #DEFAULT_RM_MAX_COMPLETED_APPLICATIONS}.
+   */
   public static final String RM_MAX_COMPLETED_APPLICATIONS =
 RM_PREFIX + "max-completed-applications";
-  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 1;
+  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 1000;
 
   /**
-   * The maximum number of completed applications RM state store keeps, by
-   * default equals to DEFAULT_RM_MAX_COMPLETED_APPLICATIONS
+   * The maximum number of completed applications RM state store keeps. By
+   * default equals to value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.
*/
   public static final String RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS =
   RM_PREFIX + "state-store.max-completed-applications";
+  /**
+   * The default value for
+   * {@code yarn.resourcemanager.state-store.max-completed-applications}.
+   * @deprecated This default value is ignored and will be removed in a future
+   * release. The default value of
+   * {@code yarn.resourcemanager.state-store.max-completed-applications} is the
+   * value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.
+   */
+  @Deprecated
   public static final int DEFAULT_RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS =
   DEFAULT_RM_MAX_COMPLETED_APPLICATIONS;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 524afec..f37c689 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -417,7 +417,7 @@
 the applications remembered in RM memory.
 Any values larger than ${yarn.resourcemanager.max-completed-applications} 
will
 be reset to ${yarn.resourcemanager.max-completed-applications}.
-Note that this value impacts the RM recovery performance.Typically,
+Note that this value impacts the RM recovery performance. Typically,
 a smaller value indicates better performance on RM recovery.
 
 yarn.resourcemanager.state-store.max-completed-applications
@@ -687,7 +687,7 @@
   
 The maximum number of completed applications RM keeps. 

 yarn.resourcemanager.max-completed-applications
-1
+1000
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java

[43/51] [abbrv] hadoop git commit: HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. Contributed by Xiaobing Zhou.

2016-10-13 Thread aengineer
HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. 
Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b371c563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b371c563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b371c563

Branch: refs/heads/HDFS-7240
Commit: b371c56365c14bbab0f5cdfffc0becaabfde8145
Parents: 1291254
Author: Anu Engineer 
Authored: Thu Oct 13 10:26:07 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 13 10:26:07 2016 -0700

--
 .../server/diskbalancer/TestDiskBalancer.java   | 44 +---
 1 file changed, 11 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b371c563/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index d911e74..9985210 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -44,7 +44,6 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
-import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -137,6 +136,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -144,6 +144,7 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap})
 .build();
 try {
   DataMover dataMover = new DataMover(cluster, dataNodeIndex,
@@ -174,7 +175,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
-
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -182,9 +183,9 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap, cap})
 .build();
 
-
 try {
   DataMover dataMover = new DataMover(cluster, dataNodeIndex,
   sourceDiskIndex, conf, blockSize, blockCount);
@@ -221,6 +222,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -228,6 +230,7 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap})
 .build();
 
 try {
@@ -246,24 +249,6 @@ public class TestDiskBalancer {
   }
 
   /**
-   * Sets alll Disks capacity to size specified.
-   *
-   * @param cluster - DiskBalancerCluster
-   * @param size- new size of the disk
-   */
-  private void setVolumeCapacity(DiskBalancerCluster cluster, long size,
- String diskType) {
-Preconditions.checkNotNull(cluster);
-for (DiskBalancerDataNode node : cluster.getNodes()) {
-  for (DiskBalancerVolume vol :
-  node.getVolumeSets().get(diskType).getVolumes()) {
-vol.setCapacity(size);
-  }
-  node.getVolumeSets().get(diskType).computeVolumeDataDensity();
-}
-  }
-
-  /**
* Helper class that allows us to create different kinds of MiniDFSClusters
* and populate data.
*/
@@ -274,6 +259,7 @@ public class TestDiskBalancer {
 private int fileLen;
 private int blockCount;
 private int diskCount;
+private long[] capacities;
 
 public ClusterBuilder 

[42/51] [abbrv] hadoop git commit: HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu

2016-10-13 Thread aengineer
HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by 
MingLiang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12912540
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12912540
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12912540

Branch: refs/heads/HDFS-7240
Commit: 129125404244f35ee63b8f0491a095371685e9ba
Parents: 9454dc5
Author: Brahma Reddy Battula 
Authored: Thu Oct 13 21:39:50 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 13 22:05:00 2016 +0530

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   8 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 106 +--
 2 files changed, 51 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 32401dc..a60f24b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -936,8 +936,7 @@ public class DFSAdmin extends FsShell {
   System.out.println("Balancer bandwidth is " + bandwidth
   + " bytes per second.");
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2207,7 +2206,7 @@ public class DFSAdmin extends FsShell {
   dnProxy.evictWriters();
   System.out.println("Requested writer eviction to datanode " + dn);
 } catch (IOException ioe) {
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2218,8 +2217,7 @@ public class DFSAdmin extends FsShell {
   DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
   System.out.println(dnInfo.getDatanodeLocalReport());
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b49f73d..dca42ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -79,6 +80,7 @@ public class TestDFSAdmin {
   @Before
   public void setUp() throws Exception {
 conf = new Configuration();
+conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
 restartCluster();
 
 admin = new DFSAdmin();
@@ -116,7 +118,7 @@ public class TestDFSAdmin {
 if (cluster != null) {
   cluster.shutdown();
 }
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 cluster.waitActive();
 datanode = cluster.getDataNodes().get(0);
 namenode = cluster.getNameNode();
@@ -171,70 +173,58 @@ public class TestDFSAdmin {
   @Test(timeout = 3)
   public void testGetDatanodeInfo() throws Exception {
 redirectStream();
-final Configuration dfsConf = new HdfsConfiguration();
-final int numDn = 2;
-
-/* init cluster */
-try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
-.numDataNodes(numDn).build()) {
-
-  miniCluster.waitActive();
-  assertEquals(numDn, miniCluster.getDataNodes().size());
-  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+final DFSAdmin dfsAdmin = new DFSAdmin(conf);
 
-  /* init reused vars */
-  List outs = null;
-  int ret;
-

[14/51] [abbrv] hadoop git commit: HADOOP-13641. Update UGI#spawnAutoRenewalThreadForUserCreds to reduce indentation. Contributed by Huafeng Wang

2016-10-13 Thread aengineer
HADOOP-13641. Update UGI#spawnAutoRenewalThreadForUserCreds to reduce 
indentation. Contributed by Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d59b18d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d59b18d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d59b18d

Branch: refs/heads/HDFS-7240
Commit: 3d59b18d49d98a293ae14c5b89d515ef83cc4ff7
Parents: bea004e
Author: Kai Zheng 
Authored: Sun Oct 9 15:53:36 2016 +0600
Committer: Kai Zheng 
Committed: Sun Oct 9 15:53:36 2016 +0600

--
 .../hadoop/security/UserGroupInformation.java   | 98 ++--
 1 file changed, 49 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d59b18d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 329859d..e8711b0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -946,60 +946,60 @@ public class UserGroupInformation {
 
   /**Spawn a thread to do periodic renewals of kerberos credentials*/
   private void spawnAutoRenewalThreadForUserCreds() {
-if (isSecurityEnabled()) {
-  //spawn thread only if we have kerb credentials
-  if (user.getAuthenticationMethod() == AuthenticationMethod.KERBEROS &&
-  !isKeytab) {
-Thread t = new Thread(new Runnable() {
-  
-  @Override
-  public void run() {
-String cmd = conf.get("hadoop.kerberos.kinit.command",
-  "kinit");
-KerberosTicket tgt = getTGT();
+if (!isSecurityEnabled()
+|| user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
+|| isKeytab) {
+  return;
+}
+
+//spawn thread only if we have kerb credentials
+Thread t = new Thread(new Runnable() {
+
+  @Override
+  public void run() {
+String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
+KerberosTicket tgt = getTGT();
+if (tgt == null) {
+  return;
+}
+long nextRefresh = getRefreshTime(tgt);
+while (true) {
+  try {
+long now = Time.now();
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Current time is " + now);
+  LOG.debug("Next refresh is " + nextRefresh);
+}
+if (now < nextRefresh) {
+  Thread.sleep(nextRefresh - now);
+}
+Shell.execCommand(cmd, "-R");
+if (LOG.isDebugEnabled()) {
+  LOG.debug("renewed ticket");
+}
+reloginFromTicketCache();
+tgt = getTGT();
 if (tgt == null) {
+  LOG.warn("No TGT after renewal. Aborting renew thread for " +
+  getUserName());
   return;
 }
-long nextRefresh = getRefreshTime(tgt);
-while (true) {
-  try {
-long now = Time.now();
-if(LOG.isDebugEnabled()) {
-  LOG.debug("Current time is " + now);
-  LOG.debug("Next refresh is " + nextRefresh);
-}
-if (now < nextRefresh) {
-  Thread.sleep(nextRefresh - now);
-}
-Shell.execCommand(cmd, "-R");
-if(LOG.isDebugEnabled()) {
-  LOG.debug("renewed ticket");
-}
-reloginFromTicketCache();
-tgt = getTGT();
-if (tgt == null) {
-  LOG.warn("No TGT after renewal. Aborting renew thread for " +
-   getUserName());
-  return;
-}
-nextRefresh = Math.max(getRefreshTime(tgt),
-   now + kerberosMinSecondsBeforeRelogin);
-  } catch (InterruptedException ie) {
-LOG.warn("Terminating renewal thread");
-return;
-  } catch (IOException ie) {
-LOG.warn("Exception encountered while running the" +
-" renewal command. Aborting renew thread. " + ie);
-return;
-  }
-}
+nextRefresh = Math.max(getRefreshTime(tgt),
+  now + 

[17/51] [abbrv] hadoop git commit: HDFS-10972. Add unit test for HDFS command 'dfsadmin -getDatanodeInfo'. Contributed by Xiaobing Zhou

2016-10-13 Thread aengineer
HDFS-10972. Add unit test for HDFS command 'dfsadmin -getDatanodeInfo'. 
Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3441c746
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3441c746
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3441c746

Branch: refs/heads/HDFS-7240
Commit: 3441c746b5f35c46fca5a0f252c86c8357fe932e
Parents: cef61d5
Author: Mingliang Liu 
Authored: Mon Oct 10 11:33:37 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Oct 10 11:33:37 2016 -0700

--
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 124 +--
 1 file changed, 113 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3441c746/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index e71c5cc..94ecb9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -30,12 +30,14 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -68,6 +70,10 @@ public class TestDFSAdmin {
   private DFSAdmin admin;
   private DataNode datanode;
   private NameNode namenode;
+  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
+  private static final PrintStream OLD_OUT = System.out;
+  private static final PrintStream OLD_ERR = System.err;
 
   @Before
   public void setUp() throws Exception {
@@ -77,12 +83,32 @@ public class TestDFSAdmin {
 admin = new DFSAdmin();
   }
 
+  private void redirectStream() {
+System.setOut(new PrintStream(out));
+System.setErr(new PrintStream(err));
+  }
+
+  private void resetStream() {
+out.reset();
+err.reset();
+  }
+
   @After
   public void tearDown() throws Exception {
+try {
+  System.out.flush();
+  System.err.flush();
+} finally {
+  System.setOut(OLD_OUT);
+  System.setErr(OLD_ERR);
+}
+
 if (cluster != null) {
   cluster.shutdown();
   cluster = null;
 }
+
+resetStream();
   }
 
   private void restartCluster() throws IOException {
@@ -111,28 +137,104 @@ public class TestDFSAdmin {
   String nodeType, String address, final List outs,
   final List errs) throws IOException {
 ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
-PrintStream out = new PrintStream(bufOut);
+PrintStream outStream = new PrintStream(bufOut);
 ByteArrayOutputStream bufErr = new ByteArrayOutputStream();
-PrintStream err = new PrintStream(bufErr);
+PrintStream errStream = new PrintStream(bufErr);
 
 if (methodName.equals("getReconfigurableProperties")) {
-  admin.getReconfigurableProperties(nodeType, address, out, err);
+  admin.getReconfigurableProperties(
+  nodeType,
+  address,
+  outStream,
+  errStream);
 } else if (methodName.equals("getReconfigurationStatus")) {
-  admin.getReconfigurationStatus(nodeType, address, out, err);
+  admin.getReconfigurationStatus(nodeType, address, outStream, errStream);
 } else if (methodName.equals("startReconfiguration")) {
-  admin.startReconfiguration(nodeType, address, out, err);
+  admin.startReconfiguration(nodeType, address, outStream, errStream);
 }
 
-Scanner scanner = new Scanner(bufOut.toString());
+scanIntoList(bufOut, outs);
+scanIntoList(bufErr, errs);
+  }
+
+  private static void scanIntoList(
+  final ByteArrayOutputStream baos,
+  final List list) {
+final Scanner scanner = new Scanner(baos.toString());
 while (scanner.hasNextLine()) {
-  outs.add(scanner.nextLine());
+  list.add(scanner.nextLine());
 }
 scanner.close();
-

[15/51] [abbrv] hadoop git commit: HDFS-10895. Update HDFS Erasure Coding doc to add how to use ISA-L based coder. Contributed by Sammi Chen

2016-10-13 Thread aengineer
HDFS-10895. Update HDFS Erasure Coding doc to add how to use ISA-L based coder. 
Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af50da32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af50da32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af50da32

Branch: refs/heads/HDFS-7240
Commit: af50da3298f92a52cc20d5f6aab6f6ad8134efbd
Parents: 3d59b18
Author: Kai Zheng 
Authored: Mon Oct 10 11:55:49 2016 +0600
Committer: Kai Zheng 
Committed: Mon Oct 10 11:55:49 2016 +0600

--
 .../src/site/markdown/HDFSErasureCoding.md   | 15 ++-
 1 file changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af50da32/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 18b3a25..627260f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -22,6 +22,7 @@ HDFS Erasure Coding
 * [Deployment](#Deployment)
 * [Cluster and hardware 
configuration](#Cluster_and_hardware_configuration)
 * [Configuration keys](#Configuration_keys)
+* [Enable Intel ISA-L](#Enable_Intel_ISA-L)
 * [Administrative commands](#Administrative_commands)
 
 Purpose
@@ -73,6 +74,9 @@ Architecture
 
 There are three policies currently being supported: RS-DEFAULT-3-2-64k, 
RS-DEFAULT-6-3-64k and RS-LEGACY-6-3-64k. All with default cell size of 64KB. 
The system default policy is RS-DEFAULT-6-3-64k which use the default schema 
RS_6_3_SCHEMA with a cell size of 64KB.
 
+ *  **Intel ISA-L**
+Intel ISA-L stands for Intel Intelligent Storage Acceleration Library. 
ISA-L is a collection of optimized low-level functions used primarily in 
storage applications. It includes a fast block Reed-Solomon type erasure codes 
optimized for Intel AVX and AVX2 instruction sets.
+HDFS EC can leverage this open-source library to accelerate encoding and 
decoding calculation. ISA-L supports most of major operating systems, including 
Linux and Windows. By default, ISA-L is not enabled in HDFS.
 
 Deployment
 --
@@ -98,7 +102,7 @@ Deployment
   `io.erasurecode.codec.rs-default.rawcoder` for the default RS codec,
   `io.erasurecode.codec.rs-legacy.rawcoder` for the legacy RS codec,
   `io.erasurecode.codec.xor.rawcoder` for the XOR codec.
-  The default implementations for all of these codecs are pure Java.
+  The default implementations for all of these codecs are pure Java. For 
default RS codec, there is also a native implementation which leverages Intel 
ISA-L library to improve the encoding and decoding calculation. Please refer to 
section "Enable Intel ISA-L" for more detail information.
 
   Erasure coding background recovery work on the DataNodes can also be tuned 
via the following configuration parameters:
 
@@ -106,6 +110,15 @@ Deployment
   1. `dfs.datanode.stripedread.threads` - Number of concurrent reader threads. 
Default value is 20 threads.
   1. `dfs.datanode.stripedread.buffer.size` - Buffer size for reader service. 
Default value is 256KB.
 
+### Enable Intel ISA-L
+
+  HDFS native implementation of default RS codec leverages Intel ISA-L library 
to improve the encoding and decoding calculation. To enable and use Intel 
ISA-L, there are three steps.
+  1. Build ISA-L library. Please refer to the offical site 
"https://github.com/01org/isa-l/; for detail information.
+  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build 
options" section in "Build instructions for Hadoop"(BUILDING.txt) document. Use 
-Dbundle.isal to copy the contents of the isal.lib directory into the final tar 
file. Deploy hadoop with the tar file. Make sure ISA-L library is available on 
both HDFS client and DataNodes.
+  3. Configure the `io.erasurecode.codec.rs-default.rawcoder` key with value 
`org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on 
HDFS client and DataNodes.
+
+  To check ISA-L library enable state, try "Hadoop checknative" command. It 
will tell you if ISA-L library is enabled or not.
+
 ### Administrative commands
 
   HDFS provides an `erasurecode` subcommand to perform administrative commands 
related to erasure coding.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/51] [abbrv] hadoop git commit: HADOOP-13024. Distcp with -delete feature on raw data not implemented. Contributed by Mavin Martin.

2016-10-13 Thread aengineer
HADOOP-13024. Distcp with -delete feature on raw data not implemented. 
Contributed by Mavin Martin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a85d079
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a85d079
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a85d079

Branch: refs/heads/HDFS-7240
Commit: 0a85d079838f532a13ca237300386d1b3bc1b178
Parents: 8c721aa
Author: Jing Zhao 
Authored: Thu Oct 13 13:24:37 2016 -0700
Committer: Jing Zhao 
Committed: Thu Oct 13 13:24:54 2016 -0700

--
 .../apache/hadoop/tools/DistCpConstants.java| 12 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  5 ++-
 .../hadoop/tools/TestDistCpWithRawXAttrs.java   | 45 +---
 .../hadoop/tools/util/DistCpTestUtils.java  | 32 --
 4 files changed, 56 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 96f364c..6171aa9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.tools;
  * limitations under the License.
  */
 
+import org.apache.hadoop.fs.Path;
+
 /**
  * Utility class to hold commonly used constants.
  */
@@ -125,9 +127,17 @@ public class DistCpConstants {
   public static final int SPLIT_RATIO_DEFAULT  = 2;
 
   /**
+   * Constants for NONE file deletion
+   */
+  public static final String NONE_PATH_NAME = "/NONE";
+  public static final Path NONE_PATH = new Path(NONE_PATH_NAME);
+  public static final Path RAW_NONE_PATH = new Path(
+  DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME + NONE_PATH_NAME);
+
+  /**
* Value of reserved raw HDFS directory when copying raw.* xattrs.
*/
-  static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
+  public static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = 
"/.reserved/raw";
 
   static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 6d2fef5..dd653b2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -238,7 +238,10 @@ public class CopyCommitter extends FileOutputCommitter {
 List targets = new ArrayList(1);
 Path targetFinalPath = new 
Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
 targets.add(targetFinalPath);
-DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
+Path resultNonePath = 
Path.getPathWithoutSchemeAndAuthority(targetFinalPath)
+
.toString().startsWith(DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME)
+? DistCpConstants.RAW_NONE_PATH : DistCpConstants.NONE_PATH;
+DistCpOptions options = new DistCpOptions(targets, resultNonePath);
 //
 // Set up options to be the same from the CopyListing.buildListing's 
perspective,
 // so to collect similar listings as when doing the copy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
index 5aef51a..8adc2cf 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
@@ -82,14 +82,7 @@ public class TestDistCpWithRawXAttrs {
 final String relDst = "/./.reserved/../.reserved/raw/../raw/dest/../dest";
 doTestPreserveRawXAttrs(relSrc, relDst, "-px", true, true,
 DistCpConstants.SUCCESS);
-

[40/51] [abbrv] hadoop git commit: HDFS-11002. Fix broken attr/getfattr/setfattr links in ExtendedAttributes.md. Contributed by Mingliang Liu.

2016-10-13 Thread aengineer
HDFS-11002. Fix broken attr/getfattr/setfattr links in ExtendedAttributes.md. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/901eca00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/901eca00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/901eca00

Branch: refs/heads/HDFS-7240
Commit: 901eca004d0e7e413b109a93128892176c808d61
Parents: 12d739a
Author: Akira Ajisaka 
Authored: Thu Oct 13 14:29:30 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Oct 13 14:29:30 2016 +0900

--
 .../hadoop-hdfs/src/site/markdown/ExtendedAttributes.md  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/901eca00/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
index 5a20986..eb527ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
@@ -30,7 +30,7 @@ Overview
 
 ### HDFS extended attributes
 
-Extended attributes in HDFS are modeled after extended attributes in Linux 
(see the Linux manpage for 
[attr(5)](http://www.bestbits.at/acl/man/man5/attr.txt) and [related 
documentation](http://www.bestbits.at/acl/)). An extended attribute is a 
*name-value pair*, with a string name and binary value. Xattrs names must also 
be prefixed with a *namespace*. For example, an xattr named *myXattr* in the 
*user* namespace would be specified as **user.myXattr**. Multiple xattrs can be 
associated with a single inode.
+Extended attributes in HDFS are modeled after extended attributes in Linux 
(see the Linux manpage for 
[attr(5)](http://man7.org/linux/man-pages/man5/attr.5.html)). An extended 
attribute is a *name-value pair*, with a string name and binary value. Xattrs 
names must also be prefixed with a *namespace*. For example, an xattr named 
*myXattr* in the *user* namespace would be specified as **user.myXattr**. 
Multiple xattrs can be associated with a single inode.
 
 ### Namespaces and Permissions
 
@@ -49,7 +49,7 @@ The `raw` namespace is reserved for internal system 
attributes that sometimes ne
 Interacting with extended attributes
 
 
-The Hadoop shell has support for interacting with extended attributes via 
`hadoop fs -getfattr` and `hadoop fs -setfattr`. These commands are styled 
after the Linux [getfattr(1)](http://www.bestbits.at/acl/man/man1/getfattr.txt) 
and [setfattr(1)](http://www.bestbits.at/acl/man/man1/setfattr.txt) commands.
+The Hadoop shell has support for interacting with extended attributes via 
`hadoop fs -getfattr` and `hadoop fs -setfattr`. These commands are styled 
after the Linux 
[getfattr(1)](http://man7.org/linux/man-pages/man1/getfattr.1.html) and 
[setfattr(1)](http://man7.org/linux/man-pages/man1/setfattr.1.html) commands.
 
 ### getfattr
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/51] [abbrv] hadoop git commit: HDFS-10988. Refactor TestBalancerBandwidth. Contributed by Brahma Reddy Battula

2016-10-13 Thread aengineer
HDFS-10988. Refactor TestBalancerBandwidth. Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9638186
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9638186
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9638186

Branch: refs/heads/HDFS-7240
Commit: b963818621c200160bb37624f177bdcb059de4eb
Parents: 65912e4
Author: Mingliang Liu 
Authored: Mon Oct 10 13:19:17 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Oct 10 13:19:17 2016 -0700

--
 .../hadoop/hdfs/TestBalancerBandwidth.java  | 57 +---
 1 file changed, 25 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9638186/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
index 6e6bbee..6bbe3a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
@@ -24,13 +24,15 @@ import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 /**
@@ -54,9 +56,8 @@ public class TestBalancerBandwidth {
 DEFAULT_BANDWIDTH);
 
 /* Create and start cluster */
-MiniDFSCluster cluster = 
-  new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
-try {
+try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(NUM_OF_DATANODES).build()) {
   cluster.waitActive();
 
   DistributedFileSystem fs = cluster.getFileSystem();
@@ -65,12 +66,6 @@ public class TestBalancerBandwidth {
   // Ensure value from the configuration is reflected in the datanodes.
   assertEquals(DEFAULT_BANDWIDTH, (long) 
datanodes.get(0).getBalancerBandwidth());
   assertEquals(DEFAULT_BANDWIDTH, (long) 
datanodes.get(1).getBalancerBandwidth());
-  ClientDatanodeProtocol dn1Proxy = DFSUtilClient
-  .createClientDatanodeProtocolProxy(datanodes.get(0).getDatanodeId(),
-  conf, 6, false);
-  ClientDatanodeProtocol dn2Proxy = DFSUtilClient
-  .createClientDatanodeProtocolProxy(datanodes.get(1).getDatanodeId(),
-  conf, 6, false);
   DFSAdmin admin = new DFSAdmin(conf);
   String dn1Address = datanodes.get(0).ipcServer.getListenerAddress()
   .getHostName() + ":" + datanodes.get(0).getIpcPort();
@@ -79,51 +74,49 @@ public class TestBalancerBandwidth {
 
   // verifies the dfsadmin command execution
   String[] args = new String[] { "-getBalancerBandwidth", dn1Address };
-  runGetBalancerBandwidthCmd(admin, args, dn1Proxy, DEFAULT_BANDWIDTH);
+  runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
   args = new String[] { "-getBalancerBandwidth", dn2Address };
-  runGetBalancerBandwidthCmd(admin, args, dn2Proxy, DEFAULT_BANDWIDTH);
+  runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
 
   // Dynamically change balancer bandwidth and ensure the updated value
   // is reflected on the datanodes.
   long newBandwidth = 12 * DEFAULT_BANDWIDTH; // 12M bps
   fs.setBalancerBandwidth(newBandwidth);
+  verifyBalancerBandwidth(datanodes, newBandwidth);
 
-  // Give it a few seconds to propogate new the value to the datanodes.
-  try {
-Thread.sleep(5000);
-  } catch (Exception e) {}
-
-  assertEquals(newBandwidth, (long) 
datanodes.get(0).getBalancerBandwidth());
-  assertEquals(newBandwidth, (long) 
datanodes.get(1).getBalancerBandwidth());
   // verifies the dfsadmin command execution
   args = new String[] { "-getBalancerBandwidth", dn1Address };
-  runGetBalancerBandwidthCmd(admin, args, dn1Proxy, newBandwidth);
+  runGetBalancerBandwidthCmd(admin, args, newBandwidth);
   args = new String[] { "-getBalancerBandwidth", dn2Address };
-  runGetBalancerBandwidthCmd(admin, args, dn2Proxy, 

[24/51] [abbrv] hadoop git commit: HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)

2016-10-13 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 57fab66..76af724 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -23,11 +23,13 @@ import java.io.FileOutputStream;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.nio.file.StandardCopyOption;
 import java.util.Collections;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -56,13 +58,18 @@ import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.BlockDirFilter;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.CloseableReferenceCount;
@@ -102,8 +109,14 @@ public class FsVolumeImpl implements FsVolumeSpi {
   private final StorageType storageType;
   private final Map bpSlices
   = new ConcurrentHashMap();
+
+  // Refers to the base StorageLocation used to construct this volume
+  // (i.e., does not include STORAGE_DIR_CURRENT in
+  // /STORAGE_DIR_CURRENT/)
+  private final StorageLocation storageLocation;
+
   private final File currentDir;// /current
-  private final DF usage;   
+  private final DF usage;
   private final long reserved;
   private CloseableReferenceCount reference = new CloseableReferenceCount();
 
@@ -124,19 +137,25 @@ public class FsVolumeImpl implements FsVolumeSpi {
*/
   protected ThreadPoolExecutor cacheExecutor;
   
-  FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
-  Configuration conf, StorageType storageType) throws IOException {
+  FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
+  Configuration conf) throws IOException {
+
+if (sd.getStorageLocation() == null) {
+  throw new IOException("StorageLocation specified for storage directory " 
+
+  sd + " is null");
+}
 this.dataset = dataset;
 this.storageID = storageID;
+this.reservedForReplicas = new AtomicLong(0L);
+this.storageLocation = sd.getStorageLocation();
+this.currentDir = sd.getCurrentDir();
+File parent = currentDir.getParentFile();
+this.usage = new DF(parent, conf);
+this.storageType = storageLocation.getStorageType();
 this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
 + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
 DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
 DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
-this.reservedForReplicas = new AtomicLong(0L);
-this.currentDir = currentDir;
-File parent = currentDir.getParentFile();
-this.usage = new DF(parent, conf);
-this.storageType = storageType;
 this.configuredCapacity = -1;
 this.conf = conf;
 cacheExecutor = initializeCacheExecutor(parent);
@@ -285,19 +304,20 @@ public class FsVolumeImpl implements FsVolumeSpi {
 return true;
   }
 
+  @VisibleForTesting
   File getCurrentDir() {
 return currentDir;
   }
   
-  File getRbwDir(String bpid) throws IOException {
+  protected File 

[41/51] [abbrv] hadoop git commit: HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. Contributed by Kihwal Lee.

2016-10-13 Thread aengineer
HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9454dc5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9454dc5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9454dc5e

Branch: refs/heads/HDFS-7240
Commit: 9454dc5e8091354cd0a4b8c8aa5f4004529db5d5
Parents: 901eca0
Author: Kihwal Lee 
Authored: Thu Oct 13 08:47:15 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 08:47:15 2016 -0500

--
 .../hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9454dc5e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 4887e35..4247a67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -332,7 +332,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, userPrincipal.getName());
+  namenode, ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 final String query = op.toQueryString() + delegationQuery


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/51] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-10-13 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d70e57a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d70e57a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d70e57a

Branch: refs/heads/HDFS-7240
Commit: 7d70e57a137622043033d37e16d478e9bc98d60d
Parents: ef84ac4 0a85d07
Author: Anu Engineer 
Authored: Thu Oct 13 15:15:30 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 13 15:15:30 2016 -0700

--
 .gitignore  |2 +
 .../server/KerberosAuthenticationHandler.java   |7 +-
 .../util/RolloverSignerSecretProvider.java  |2 +-
 .../util/TestZKSignerSecretProvider.java|  221 +-
 .../dev-support/findbugsExcludeFile.xml |5 +
 hadoop-common-project/hadoop-common/pom.xml |1 +
 .../org/apache/hadoop/conf/ConfServlet.java |   19 +-
 .../org/apache/hadoop/conf/Configuration.java   |  307 ++-
 .../apache/hadoop/fs/CachingGetSpaceUsed.java   |3 +-
 .../apache/hadoop/fs/DFCachingGetSpaceUsed.java |   48 +
 .../src/main/java/org/apache/hadoop/fs/DU.java  |8 +-
 .../apache/hadoop/fs/FileEncryptionInfo.java|   21 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   10 +-
 .../java/org/apache/hadoop/fs/TrashPolicy.java  |   36 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java|   15 +
 .../apache/hadoop/fs/permission/AclEntry.java   |   24 +-
 .../hadoop/fs/permission/AclEntryScope.java |2 +-
 .../hadoop/fs/permission/AclEntryType.java  |   23 +-
 .../apache/hadoop/fs/permission/AclStatus.java  |2 +-
 .../org/apache/hadoop/fs/shell/AclCommands.java |6 +-
 .../apache/hadoop/io/compress/SnappyCodec.java  |   30 +-
 .../org/apache/hadoop/ipc/ExternalCall.java |   88 +
 .../main/java/org/apache/hadoop/ipc/Server.java |   63 +-
 .../apache/hadoop/ipc/WritableRpcEngine.java|5 +-
 .../java/org/apache/hadoop/log/LogLevel.java|9 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |2 +-
 .../org/apache/hadoop/security/Credentials.java |8 +-
 .../hadoop/security/KerberosAuthException.java  |  118 +
 .../hadoop/security/UGIExceptionMessages.java   |   46 +
 .../hadoop/security/UserGroupInformation.java   |  203 +-
 .../org/apache/hadoop/security/token/Token.java |   60 +-
 .../src/main/resources/core-default.xml |6 +-
 .../src/site/markdown/CommandsManual.md |4 +-
 .../src/site/markdown/FileSystemShell.md|3 +-
 .../src/site/markdown/filesystem/filesystem.md  |   77 +-
 .../org/apache/hadoop/conf/TestConfServlet.java |  122 +-
 .../apache/hadoop/conf/TestConfiguration.java   |  164 +-
 .../apache/hadoop/fs/FileContextURIBase.java|4 +-
 .../hadoop/fs/TestDFCachingGetSpaceUsed.java|   75 +
 .../hadoop/fs/TestFileSystemInitialization.java |   12 +-
 .../java/org/apache/hadoop/fs/TestTrash.java|4 +
 .../AbstractContractRootDirectoryTest.java  |   34 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   39 +
 .../hadoop/ha/TestZKFailoverController.java |   34 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |   87 +
 .../security/TestUserGroupInformation.java  |   33 +-
 .../hadoop/crypto/key/kms/server/KMS.java   |  665 +++--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |4 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |   30 +
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|   21 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java|   88 +-
 .../hdfs/web/resources/AclPermissionParam.java  |   23 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |1 -
 .../hadoop/fs/http/server/FSOperations.java |9 +-
 .../service/hadoop/FileSystemAccessService.java |6 +-
 .../src/main/native/libhdfs/include/hdfs/hdfs.h |1 +
 .../src/contrib/bkjournal/README.txt|   66 -
 .../dev-support/findbugsExcludeFile.xml |5 -
 .../hadoop-hdfs/src/contrib/bkjournal/pom.xml   |  175 --
 .../bkjournal/BookKeeperEditLogInputStream.java |  264 --
 .../BookKeeperEditLogOutputStream.java  |  188 --
 .../bkjournal/BookKeeperJournalManager.java |  893 ---
 .../contrib/bkjournal/CurrentInprogress.java|  160 --
 .../bkjournal/EditLogLedgerMetadata.java|  217 --
 .../hadoop/contrib/bkjournal/MaxTxId.java   |  103 -
 .../bkjournal/src/main/proto/bkjournal.proto|   49 -
 .../hadoop/contrib/bkjournal/BKJMUtil.java  |  184 --
 .../bkjournal/TestBookKeeperAsHASharedDir.java  |  414 ---
 .../bkjournal/TestBookKeeperConfiguration.java  |  174 --
 .../bkjournal/TestBookKeeperEditLogStreams.java |   92 -
 .../bkjournal/TestBookKeeperHACheckpoints.java  |  109 -
 .../bkjournal/TestBookKeeperJournalManager.java |  984 ---
 .../TestBookKeeperSpeculativeRead.java  |  167 --
 .../bkjournal/TestBootstrapStandbyWithBKJM.java 

[07/51] [abbrv] hadoop git commit: HDFS-10980. Optimize check for existence of parent directory. Contributed by Daryn Sharp.

2016-10-13 Thread aengineer
HDFS-10980. Optimize check for existence of parent directory. Contributed by 
Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e57fa81d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e57fa81d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e57fa81d

Branch: refs/heads/HDFS-7240
Commit: e57fa81d9559a93d77fd724f7792326c31a490be
Parents: f3f37e6
Author: Kihwal Lee 
Authored: Fri Oct 7 17:20:15 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 7 17:20:15 2016 -0500

--
 .../hdfs/server/namenode/FSDirMkdirOp.java  |  2 +-
 .../hdfs/server/namenode/FSDirSymlinkOp.java|  2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  2 +-
 .../hdfs/server/namenode/FSDirectory.java   | 11 ++---
 .../hdfs/server/namenode/TestFSDirectory.java   | 48 
 5 files changed, 56 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 2d1914f..4d8d7d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -66,7 +66,7 @@ class FSDirMkdirOp {
 }
 
 if (!createParent) {
-  fsd.verifyParentDir(iip, src);
+  fsd.verifyParentDir(iip);
 }
 
 // validate that we have enough inodes. This is, at best, a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
index 6938a84..71362f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
@@ -58,7 +58,7 @@ class FSDirSymlinkOp {
   iip = fsd.resolvePathForWrite(pc, link, false);
   link = iip.getPath();
   if (!createParent) {
-fsd.verifyParentDir(iip, link);
+fsd.verifyParentDir(iip);
   }
   if (!fsd.isValidToCreate(link, iip)) {
 throw new IOException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 40be83b..aab0f76 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -323,7 +323,7 @@ class FSDirWriteFileOp {
   }
 } else {
   if (!createParent) {
-dir.verifyParentDir(iip, src);
+dir.verifyParentDir(iip);
   }
   if (!flag.contains(CreateFlag.CREATE)) {
 throw new FileNotFoundException("Can't overwrite non-existent " + src);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 8456da6..a059ee5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1765,17 +1765,16 @@ public class FSDirectory implements Closeable {
   /**
* Verify that parent directory of src exists.
*/
-  void 

[38/51] [abbrv] hadoop git commit: HDFS-10789. Route webhdfs through the RPC call queue. Contributed by Daryn Sharp and Rushabh S Shah.

2016-10-13 Thread aengineer
HDFS-10789. Route webhdfs through the RPC call queue. Contributed by Daryn 
Sharp and Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85cd06f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85cd06f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85cd06f6

Branch: refs/heads/HDFS-7240
Commit: 85cd06f6636f295ad1f3bf2a90063f4714c9cca7
Parents: 6476934
Author: Kihwal Lee 
Authored: Wed Oct 12 15:11:42 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Oct 12 15:11:42 2016 -0500

--
 .../org/apache/hadoop/ipc/ExternalCall.java |   9 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |   6 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  15 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  12 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   6 +-
 .../web/resources/NamenodeWebHdfsMethods.java   | 150 +++
 .../src/main/resources/hdfs-default.xml |   7 +
 .../server/namenode/TestNamenodeRetryCache.java |  25 +++-
 .../web/resources/TestWebHdfsDataLocality.java  |  25 +++-
 10 files changed, 160 insertions(+), 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
index 9b4cbcf..5566136 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ipc;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.ipc.Server.Call;
@@ -37,14 +38,10 @@ public abstract class ExternalCall extends Call {
 
   public abstract UserGroupInformation getRemoteUser();
 
-  public final T get() throws IOException, InterruptedException {
+  public final T get() throws InterruptedException, ExecutionException {
 waitForCompletion();
 if (error != null) {
-  if (error instanceof IOException) {
-throw (IOException)error;
-  } else {
-throw new IOException(error);
-  }
+  throw new ExecutionException(error);
 }
 return result;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 92d9183..72b603a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -72,6 +72,7 @@ import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
@@ -989,8 +990,9 @@ public class TestRPC extends TestRpcBase {
   try {
 exceptionCall.get();
 fail("didn't throw");
-  } catch (IOException ioe) {
-assertEquals(expectedIOE.getMessage(), ioe.getMessage());
+  } catch (ExecutionException ee) {
+assertTrue((ee.getCause()) instanceof IOException);
+assertEquals(expectedIOE.getMessage(), ee.getCause().getMessage());
   }
 } finally {
   server.stop();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 18209ae..10c0ad6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -70,6 +70,9 @@ public class DFSConfigKeys extends 

[09/51] [abbrv] hadoop git commit: HDFS-10968. BlockManager#isInNewRack should consider decommissioning nodes. Contributed by Jing Zhao.

2016-10-13 Thread aengineer
HDFS-10968. BlockManager#isInNewRack should consider decommissioning nodes. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d106213
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d106213
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d106213

Branch: refs/heads/HDFS-7240
Commit: 4d106213c0f4835b723c9a50bd8080a9017122d7
Parents: 6a38d11
Author: Jing Zhao 
Authored: Fri Oct 7 22:44:54 2016 -0700
Committer: Jing Zhao 
Committed: Fri Oct 7 22:44:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java|   6 +-
 ...constructStripedBlocksWithRackAwareness.java | 158 +++
 2 files changed, 130 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d106213/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8b74609..7949439 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1781,8 +1781,12 @@ public class BlockManager implements BlockStatsMXBean {
 
   private boolean isInNewRack(DatanodeDescriptor[] srcs,
   DatanodeDescriptor target) {
+LOG.debug("check if target {} increases racks, srcs={}", target,
+Arrays.asList(srcs));
 for (DatanodeDescriptor src : srcs) {
-  if (src.getNetworkLocation().equals(target.getNetworkLocation())) {
+  if (!src.isDecommissionInProgress() &&
+  src.getNetworkLocation().equals(target.getNetworkLocation())) {
+LOG.debug("the target {} is in the same rack with src {}", target, 
src);
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d106213/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 152e153..3bc13a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -35,12 +35,14 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
-import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -58,57 +60,44 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
   }
 
-  private static final String[] hosts = getHosts();
-  private static final String[] racks = getRacks();
+  private static final String[] hosts =
+  getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1);
+  private static final String[] racks =
+  getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1, NUM_DATA_BLOCKS);
 
-  private static String[] getHosts() {
-String[] hosts = new String[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1];
+  private static String[] getHosts(int numHosts) {
+String[] hosts = new String[numHosts];
 for (int i = 0; i < hosts.length; i++) {
   hosts[i] = "host" + (i + 1);
 }
 return hosts;
   }
 
-  private static String[] getRacks() {
-String[] racks = new String[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1];
-int numHostEachRack = (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS - 1) /
-(NUM_DATA_BLOCKS - 1) + 1;
+  private static String[] getRacks(int numHosts, int numRacks) {
+String[] racks = new String[numHosts];
+int numHostEachRack = numHosts / numRacks;
+int residue = numHosts % numRacks;
 int j = 0;
-// we have NUM_DATA_BLOCKS racks
-for (int i = 1; i <= 

[35/51] [abbrv] hadoop git commit: HDFS-10965. Add unit test for HDFS command 'dfsadmin -printTopology'. Contributed by Xiaobing Zhou

2016-10-13 Thread aengineer
HDFS-10965. Add unit test for HDFS command 'dfsadmin -printTopology'. 
Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ba7092b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ba7092b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ba7092b

Branch: refs/heads/HDFS-7240
Commit: 7ba7092bbcbbccfa24b672414d315656e600096c
Parents: b84c489
Author: Mingliang Liu 
Authored: Tue Oct 11 16:47:39 2016 -0700
Committer: Mingliang Liu 
Committed: Tue Oct 11 17:23:54 2016 -0700

--
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 50 
 1 file changed, 50 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ba7092b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 94ecb9e..b49f73d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
@@ -364,6 +365,55 @@ public class TestDFSAdmin {
   }
 
   @Test(timeout = 3)
+  public void testPrintTopology() throws Exception {
+redirectStream();
+
+/* init conf */
+final Configuration dfsConf = new HdfsConfiguration();
+final File baseDir = new File(
+PathUtils.getTestDir(getClass()),
+GenericTestUtils.getMethodName());
+dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, 
baseDir.getAbsolutePath());
+
+final int numDn = 4;
+final String[] racks = {
+"/d1/r1", "/d1/r2",
+"/d2/r1", "/d2/r2"};
+
+/* init cluster using topology */
+try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
+.numDataNodes(numDn).racks(racks).build()) {
+
+  miniCluster.waitActive();
+  assertEquals(numDn, miniCluster.getDataNodes().size());
+  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+
+  resetStream();
+  final int ret = ToolRunner.run(dfsAdmin, new String[] 
{"-printTopology"});
+
+  /* collect outputs */
+  final List outs = Lists.newArrayList();
+  scanIntoList(out, outs);
+
+  /* verify results */
+  assertEquals(0, ret);
+  assertEquals(
+  "There should be three lines per Datanode: the 1st line is"
+  + " rack info, 2nd node info, 3rd empty line. The total"
+  + " should be as a result of 3 * numDn.",
+  12, outs.size());
+  assertThat(outs.get(0),
+  is(allOf(containsString("Rack:"), containsString("/d1/r1";
+  assertThat(outs.get(3),
+  is(allOf(containsString("Rack:"), containsString("/d1/r2";
+  assertThat(outs.get(6),
+  is(allOf(containsString("Rack:"), containsString("/d2/r1";
+  assertThat(outs.get(9),
+  is(allOf(containsString("Rack:"), containsString("/d2/r2";
+}
+  }
+
+  @Test(timeout = 3)
   public void testNameNodeGetReconfigurationStatus() throws IOException,
   InterruptedException, TimeoutException {
 ReconfigurationUtil ru = mock(ReconfigurationUtil.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/51] [abbrv] hadoop git commit: HDFS-10991. Export hdfsTruncateFile symbol in libhdfs. Contributed by Surendra Singh Lilhore.

2016-10-13 Thread aengineer
HDFS-10991. Export hdfsTruncateFile symbol in libhdfs. Contributed by Surendra 
Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dacd3ec6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dacd3ec6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dacd3ec6

Branch: refs/heads/HDFS-7240
Commit: dacd3ec66b111be24131957c986f0c748cf9ea26
Parents: 8a09bf7
Author: Andrew Wang 
Authored: Tue Oct 11 15:07:14 2016 -0700
Committer: Andrew Wang 
Committed: Tue Oct 11 15:07:14 2016 -0700

--
 .../src/main/native/libhdfs/include/hdfs/hdfs.h | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dacd3ec6/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
index c856928..83c1c59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -493,6 +493,7 @@ extern  "C" {
  * complete before proceeding with further file updates.
  * -1 on error.
  */
+LIBHDFS_EXTERNAL
 int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength);
 
 /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/51] [abbrv] hadoop git commit: Merge branch 'HADOOP-12756' into trunk

2016-10-13 Thread aengineer
Merge branch 'HADOOP-12756' into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/669d6f13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/669d6f13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/669d6f13

Branch: refs/heads/HDFS-7240
Commit: 669d6f13ec48a90d4ba7e4ed1dd0e9687580f8f3
Parents: c874fa9 c31b5e6
Author: Kai Zheng 
Authored: Tue Oct 11 03:22:11 2016 +0600
Committer: Kai Zheng 
Committed: Tue Oct 11 03:22:11 2016 +0600

--
 .gitignore  |   2 +
 hadoop-project/pom.xml  |  22 +
 .../dev-support/findbugs-exclude.xml|  18 +
 hadoop-tools/hadoop-aliyun/pom.xml  | 154 +
 .../aliyun/oss/AliyunCredentialsProvider.java   |  87 +++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  | 580 +++
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 516 +
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 260 +
 .../fs/aliyun/oss/AliyunOSSOutputStream.java| 111 
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java| 167 ++
 .../apache/hadoop/fs/aliyun/oss/Constants.java  | 113 
 .../hadoop/fs/aliyun/oss/package-info.java  |  22 +
 .../site/markdown/tools/hadoop-aliyun/index.md  | 294 ++
 .../fs/aliyun/oss/AliyunOSSTestUtils.java   |  77 +++
 .../fs/aliyun/oss/TestAliyunCredentials.java|  78 +++
 .../oss/TestAliyunOSSFileSystemContract.java| 239 
 .../oss/TestAliyunOSSFileSystemStore.java   | 125 
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java | 145 +
 .../aliyun/oss/TestAliyunOSSOutputStream.java   |  91 +++
 .../aliyun/oss/contract/AliyunOSSContract.java  |  49 ++
 .../contract/TestAliyunOSSContractCreate.java   |  35 ++
 .../contract/TestAliyunOSSContractDelete.java   |  34 ++
 .../contract/TestAliyunOSSContractDistCp.java   |  44 ++
 .../TestAliyunOSSContractGetFileStatus.java |  35 ++
 .../contract/TestAliyunOSSContractMkdir.java|  34 ++
 .../oss/contract/TestAliyunOSSContractOpen.java |  34 ++
 .../contract/TestAliyunOSSContractRename.java   |  35 ++
 .../contract/TestAliyunOSSContractRootDir.java  |  69 +++
 .../oss/contract/TestAliyunOSSContractSeek.java |  34 ++
 .../src/test/resources/contract/aliyun-oss.xml  | 115 
 .../src/test/resources/core-site.xml|  46 ++
 .../src/test/resources/log4j.properties |  23 +
 hadoop-tools/hadoop-tools-dist/pom.xml  |   6 +
 hadoop-tools/pom.xml|   1 +
 34 files changed, 3695 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/51] [abbrv] hadoop git commit: HADOOP-12579. Deprecate WriteableRPCEngine. Contributed by Wei Zhou

2016-10-13 Thread aengineer
HADOOP-12579. Deprecate WriteableRPCEngine. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec0b7071
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec0b7071
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec0b7071

Branch: refs/heads/HDFS-7240
Commit: ec0b70716c8e6509654a3975d3ca139a0144cc8e
Parents: 4d10621
Author: Kai Zheng 
Authored: Sun Oct 9 15:07:03 2016 +0600
Committer: Kai Zheng 
Committed: Sun Oct 9 15:07:03 2016 +0600

--
 .../src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec0b7071/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index a9dbb41..3d6d461 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -46,6 +46,7 @@ import org.apache.htrace.core.Tracer;
 
 /** An RpcEngine implementation for Writable data. */
 @InterfaceStability.Evolving
+@Deprecated
 public class WritableRpcEngine implements RpcEngine {
   private static final Log LOG = LogFactory.getLog(RPC.class);
   
@@ -331,6 +332,7 @@ public class WritableRpcEngine implements RpcEngine {
 
 
   /** An RPC Server. */
+  @Deprecated
   public static class Server extends RPC.Server {
 /** 
  * Construct an RPC server.
@@ -443,7 +445,8 @@ public class WritableRpcEngine implements RpcEngine {
 value = value.substring(0, 55)+"...";
   LOG.info(value);
 }
-
+
+@Deprecated
 static class WritableRpcInvoker implements RpcInvoker {
 
  @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/51] [abbrv] hadoop git commit: YARN-5677. RM should transition to standby when connection is lost for an extended period. (Daniel Templeton via kasha)

2016-10-13 Thread aengineer
YARN-5677. RM should transition to standby when connection is lost for an 
extended period. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6476934a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6476934a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6476934a

Branch: refs/heads/HDFS-7240
Commit: 6476934ae5de1be7988ab198b673d82fe0f006e3
Parents: 6378845
Author: Karthik Kambatla 
Authored: Tue Oct 11 22:07:10 2016 -0700
Committer: Karthik Kambatla 
Committed: Tue Oct 11 22:07:10 2016 -0700

--
 .../resourcemanager/EmbeddedElectorService.java |  59 +-
 .../resourcemanager/TestRMEmbeddedElector.java  | 191 +++
 2 files changed, 244 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6476934a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
index 72327e8..88d2e10 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,6 +40,8 @@ import org.apache.zookeeper.data.ACL;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Timer;
+import java.util.TimerTask;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -54,6 +57,10 @@ public class EmbeddedElectorService extends AbstractService
 
   private byte[] localActiveNodeInfo;
   private ActiveStandbyElector elector;
+  private long zkSessionTimeout;
+  private Timer zkDisconnectTimer;
+  @VisibleForTesting
+  final Object zkDisconnectLock = new Object();
 
   EmbeddedElectorService(RMContext rmContext) {
 super(EmbeddedElectorService.class.getName());
@@ -80,7 +87,7 @@ public class EmbeddedElectorService extends AbstractService
 YarnConfiguration.DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH);
 String electionZNode = zkBasePath + "/" + clusterId;
 
-long zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
+zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
 YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
 
 List zkAcls = RMZKUtils.getZKAcls(conf);
@@ -123,6 +130,8 @@ public class EmbeddedElectorService extends AbstractService
 
   @Override
   public void becomeActive() throws ServiceFailedException {
+cancelDisconnectTimer();
+
 try {
   rmContext.getRMAdminService().transitionToActive(req);
 } catch (Exception e) {
@@ -132,6 +141,8 @@ public class EmbeddedElectorService extends AbstractService
 
   @Override
   public void becomeStandby() {
+cancelDisconnectTimer();
+
 try {
   rmContext.getRMAdminService().transitionToStandby(req);
 } catch (Exception e) {
@@ -139,13 +150,49 @@ public class EmbeddedElectorService extends 
AbstractService
 }
   }
 
+  /**
+   * Stop the disconnect timer.  Any running tasks will be allowed to complete.
+   */
+  private void cancelDisconnectTimer() {
+synchronized (zkDisconnectLock) {
+  if (zkDisconnectTimer != null) {
+zkDisconnectTimer.cancel();
+zkDisconnectTimer = null;
+  }
+}
+  }
+
+  /**
+   * When the ZK client loses contact with ZK, this method will be called to
+   * allow the RM to react. Because the loss of connection can be noticed
+   * before the session timeout happens, it is undesirable to transition
+   * immediately. Instead the method starts a timer that will wait
+   * {@link YarnConfiguration#RM_ZK_TIMEOUT_MS} milliseconds before
+   * initiating the transition into standby state.
+   */
   @Override
   public void enterNeutralMode() {
-/**
- * Possibly due to transient connection issues. Do nothing.
- * TODO: Might want to keep track of how long in 

[16/51] [abbrv] hadoop git commit: HADOOP-13696. change hadoop-common dependency scope of jsch to provided. Contributed by Yuanbo Liu.

2016-10-13 Thread aengineer
HADOOP-13696. change hadoop-common dependency scope of jsch to provided. 
Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cef61d50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cef61d50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cef61d50

Branch: refs/heads/HDFS-7240
Commit: cef61d505e289f074130cc3981c20f7692437cee
Parents: af50da3
Author: Steve Loughran 
Authored: Mon Oct 10 12:32:39 2016 +0100
Committer: Steve Loughran 
Committed: Mon Oct 10 12:32:39 2016 +0100

--
 hadoop-common-project/hadoop-common/pom.xml | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef61d50/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 54d1cdd..92582ae 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -235,6 +235,7 @@
 
   com.jcraft
   jsch
+  provided
 
 
   org.apache.curator


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/51] [abbrv] hadoop git commit: HADOOP-13705. Revert HADOOP-13534 Remove unused TrashPolicy#getInstance and initialize code.

2016-10-13 Thread aengineer
HADOOP-13705. Revert HADOOP-13534 Remove unused TrashPolicy#getInstance and 
initialize code.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a09bf7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a09bf7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a09bf7c

Branch: refs/heads/HDFS-7240
Commit: 8a09bf7c19d9d2f6d6853d45e11b0d38c7c67f2a
Parents: 4b32b14
Author: Andrew Wang 
Authored: Tue Oct 11 13:46:07 2016 -0700
Committer: Andrew Wang 
Committed: Tue Oct 11 13:46:07 2016 -0700

--
 .../java/org/apache/hadoop/fs/TrashPolicy.java  | 30 
 .../apache/hadoop/fs/TrashPolicyDefault.java| 15 ++
 .../java/org/apache/hadoop/fs/TestTrash.java|  4 +++
 3 files changed, 49 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index bd99db4..157b9ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -38,6 +38,17 @@ public abstract class TrashPolicy extends Configured {
 
   /**
* Used to setup the trash policy. Must be implemented by all TrashPolicy
+   * implementations.
+   * @param conf the configuration to be used
+   * @param fs the filesystem to be used
+   * @param home the home directory
+   * @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.
+   */
+  @Deprecated
+  public abstract void initialize(Configuration conf, FileSystem fs, Path 
home);
+
+  /**
+   * Used to setup the trash policy. Must be implemented by all TrashPolicy
* implementations. Different from initialize(conf, fs, home), this one does
* not assume trash always under /user/$USER due to HDFS encryption zone.
* @param conf the configuration to be used
@@ -105,6 +116,25 @@ public abstract class TrashPolicy extends Configured {
*
* @param conf the configuration to be used
* @param fs the file system to be used
+   * @param home the home directory
+   * @return an instance of TrashPolicy
+   * @deprecated Use {@link #getInstance(Configuration, FileSystem)} instead.
+   */
+  @Deprecated
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs, 
Path home) {
+Class trashClass = conf.getClass(
+"fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
+TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
+trash.initialize(conf, fs, home); // initialize TrashPolicy
+return trash;
+  }
+
+  /**
+   * Get an instance of the configured TrashPolicy based on the value
+   * of the configuration parameter fs.trash.classname.
+   *
+   * @param conf the configuration to be used
+   * @param fs the file system to be used
* @return an instance of TrashPolicy
*/
   public static TrashPolicy getInstance(Configuration conf, FileSystem fs)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index f4a825c..7be 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -75,6 +75,21 @@ public class TrashPolicyDefault extends TrashPolicy {
 initialize(conf, fs);
   }
 
+  /**
+   * @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.
+   */
+  @Override
+  @Deprecated
+  public void initialize(Configuration conf, FileSystem fs, Path home) {
+this.fs = fs;
+this.deletionInterval = (long)(conf.getFloat(
+FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
+* MSECS_PER_MINUTE);
+this.emptierInterval = (long)(conf.getFloat(
+FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
+* MSECS_PER_MINUTE);
+   }
+
   @Override
   public void initialize(Configuration conf, FileSystem fs) {
 this.fs = fs;


[33/51] [abbrv] hadoop git commit: HDFS-10903. Replace config key literal strings with config key names II: hadoop hdfs. Contributed by Chen Liang

2016-10-13 Thread aengineer
HDFS-10903. Replace config key literal strings with config key names II: hadoop 
hdfs. Contributed by Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c9a0106
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c9a0106
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c9a0106

Branch: refs/heads/HDFS-7240
Commit: 3c9a01062e9097c2ed1db75318482543db2e382f
Parents: 61f0490
Author: Mingliang Liu 
Authored: Tue Oct 11 16:29:30 2016 -0700
Committer: Mingliang Liu 
Committed: Tue Oct 11 16:29:30 2016 -0700

--
 .../java/org/apache/hadoop/fs/http/server/FSOperations.java | 9 +++--
 .../hadoop/lib/service/hadoop/FileSystemAccessService.java  | 6 --
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +++
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 8 
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java   | 3 ++-
 .../hdfs/server/blockmanagement/TestBlockTokenWithDFS.java  | 3 ++-
 6 files changed, 26 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 46948f9..001bc92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -48,6 +48,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
+
 /**
  * FileSystem operation executors used by {@link HttpFSServer}.
  */
@@ -462,7 +465,8 @@ public class FSOperations {
 blockSize = fs.getDefaultBlockSize(path);
   }
   FsPermission fsPermission = new FsPermission(permission);
-  int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
+  int bufferSize = fs.getConf().getInt(HTTPFS_BUFFER_SIZE_KEY,
+  HTTP_BUFFER_SIZE_DEFAULT);
   OutputStream os = fs.create(path, fsPermission, override, bufferSize, 
replication, blockSize, null);
   IOUtils.copyBytes(is, os, bufferSize, true);
   os.close();
@@ -752,7 +756,8 @@ public class FSOperations {
  */
 @Override
 public InputStream execute(FileSystem fs) throws IOException {
-  int bufferSize = 
HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096);
+  int bufferSize = HttpFSServerWebApp.get().getConfig().getInt(
+  HTTPFS_BUFFER_SIZE_KEY, HTTP_BUFFER_SIZE_DEFAULT);
   return fs.open(path, bufferSize);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
index 0b767be..61d3b45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
@@ -50,6 +50,8 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
 @InterfaceAudience.Private
 public class FileSystemAccessService extends BaseService implements 
FileSystemAccess {
   private static final Logger LOG = 
LoggerFactory.getLogger(FileSystemAccessService.class);
@@ -159,7 +161,7 @@ public class FileSystemAccessService extends BaseService 
implements FileSystemAc
 throw new ServiceException(FileSystemAccessException.ERROR.H01, 
KERBEROS_PRINCIPAL);
   }
   Configuration conf = new Configuration();
-  conf.set("hadoop.security.authentication", "kerberos");
+  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
   UserGroupInformation.setConfiguration(conf);
   try {
 

[06/51] [abbrv] hadoop git commit: MAPREDUCE-6776. yarn.app.mapreduce.client.job.max-retries should have a more useful default (miklos.szeg...@cloudera.com via rkanter)

2016-10-13 Thread aengineer
MAPREDUCE-6776. yarn.app.mapreduce.client.job.max-retries should have a more 
useful default (miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3f37e6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3f37e6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3f37e6f

Branch: refs/heads/HDFS-7240
Commit: f3f37e6fb8172f6434e06eb9a137c0c155b3952e
Parents: 2e853be
Author: Robert Kanter 
Authored: Fri Oct 7 14:47:06 2016 -0700
Committer: Robert Kanter 
Committed: Fri Oct 7 14:47:06 2016 -0700

--
 .../apache/hadoop/mapreduce/MRJobConfig.java|  2 +-
 .../src/main/resources/mapred-default.xml   | 10 +++---
 .../apache/hadoop/mapred/JobClientUnitTest.java | 34 
 3 files changed, 34 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 5716404..1325b74 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -505,7 +505,7 @@ public interface MRJobConfig {
*/
   public static final String MR_CLIENT_JOB_MAX_RETRIES =
   MR_PREFIX + "client.job.max-retries";
-  public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 0;
+  public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 3;
 
   /**
* How long to wait between jobclient retries on failure

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 73aaa7a..fe29212 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1505,12 +1505,12 @@
 
 
   yarn.app.mapreduce.client.job.max-retries
-  0
+  3
   The number of retries the client will make for getJob and
-  dependent calls.  The default is 0 as this is generally only needed for
-  non-HDFS DFS where additional, high level retries are required to avoid
-  spurious failures during the getJob call.  30 is a good value for
-  WASB
+dependent calls.
+This is needed for non-HDFS DFS where additional, high level
+retries are required to avoid spurious failures during the getJob call.
+30 is a good value for WASB
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
index 4895a5b..e02232d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
@@ -225,10 +225,10 @@ public class JobClientUnitTest {
 
 //To prevent the test from running for a very long time, lower the retry
 JobConf conf = new JobConf();
-conf.set(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, "3");
+conf.setInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, 2);
 
 TestJobClientGetJob client = new TestJobClientGetJob(conf);
-JobID id = new JobID("ajob",1);
+JobID id = new JobID("ajob", 1);
 

hadoop git commit: HDFS-11004. Ozone : move Chunk IO and container protocol calls to hdfs-client. Contributed by Chen Liang.

2016-10-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 1fc744c6c -> c70775aff


HDFS-11004. Ozone : move Chunk IO and container protocol calls to hdfs-client. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c70775af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c70775af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c70775af

Branch: refs/heads/HDFS-7240
Commit: c70775aff6113a3bbaa237923fad3c21a73a7793
Parents: 1fc744c
Author: Anu Engineer 
Authored: Thu Oct 13 16:34:29 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 13 16:34:29 2016 -0700

--
 .../org/apache/hadoop/scm/ScmConfigKeys.java|   3 +
 .../hadoop/scm/storage/ChunkInputStream.java| 191 
 .../hadoop/scm/storage/ChunkOutputStream.java   | 222 +++
 .../scm/storage/ContainerProtocolCalls.java | 190 
 .../apache/hadoop/scm/storage/package-info.java |  23 ++
 .../ozone/web/storage/ChunkInputStream.java | 193 
 .../ozone/web/storage/ChunkOutputStream.java| 219 --
 .../web/storage/ContainerProtocolCalls.java | 198 -
 .../web/storage/DistributedStorageHandler.java  |  22 +-
 9 files changed, 641 insertions(+), 620 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c70775af/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
index a1b2393..44414ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
@@ -29,4 +29,7 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_IPC_PORT =
   "dfs.container.ipc";
   public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 50011;
+
+  // TODO : this is copied from OzoneConsts, may need to move to a better place
+  public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c70775af/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ChunkInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ChunkInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ChunkInputStream.java
new file mode 100644
index 000..1206ecd
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ChunkInputStream.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.scm.storage;
+
+import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.*;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import com.google.protobuf.ByteString;
+
+import 
org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ReadChunkResponseProto;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.scm.XceiverClient;
+import org.apache.hadoop.scm.XceiverClientManager;
+
+/**
+ * An {@link InputStream} used by the REST service in combination with the
+ * SCMClient to read the value of a key from a sequence
+ * of container chunks.  All bytes of the key value are stored in container
+ * chunks.  Each chunk may contain multiple underlying {@link ByteBuffer}
+ * instances.  This class encapsulates all state management for iterating
+ * through the sequence of chunks and the sequence of 

hadoop git commit: HDFS-10912. Ozone:SCM: Add chill mode support to NodeManager. Contributed by Anu Engineer.

2016-10-13 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 841742cdd -> 1fc744c6c


HDFS-10912. Ozone:SCM: Add chill mode support to NodeManager. Contributed by 
Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fc744c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fc744c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fc744c6

Branch: refs/heads/HDFS-7240
Commit: 1fc744c6c5bfffa9aedde5aa66c38ac47847e9a0
Parents: 841742c
Author: Anu Engineer 
Authored: Thu Oct 13 16:00:29 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 13 16:00:29 2016 -0700

--
 .../hadoop/ozone/scm/node/NodeManager.java  |  43 +++-
 .../hadoop/ozone/scm/node/SCMNodeManager.java   | 107 ---
 .../hadoop/ozone/scm/node/TestNodeManager.java  |  92 
 3 files changed, 204 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fc744c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManager.java
index 699c789..9de6c81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManager.java
@@ -94,18 +94,51 @@ public interface NodeManager extends Closeable, Runnable {
   List getAllNodes();
 
   /**
-   * Get the minimum number of nodes to get out of safe mode.
+   * Get the minimum number of nodes to get out of chill mode.
*
* @return int
*/
-  int getMinimumSafeModeNodes();
+  int getMinimumChillModeNodes();
 
   /**
-   * Reports if we have exited out of safe mode by discovering enough nodes.
+   * Reports if we have exited out of chill mode by discovering enough nodes.
*
-   * @return True if we are out of Node layer safe mode, false otherwise.
+   * @return True if we are out of Node layer chill mode, false otherwise.
*/
-  boolean isOutOfNodeSafeMode();
+  boolean isOutOfNodeChillMode();
+
+  /**
+   * Chill mode is the period when node manager waits for a minimum
+   * configured number of datanodes to report in. This is called chill mode
+   * to indicate the period before node manager gets into action.
+   *
+   * Forcefully exits the chill mode, even if we have not met the minimum
+   * criteria of the nodes reporting in.
+   */
+  void forceExitChillMode();
+
+  /**
+   * Forcefully enters chill mode, even if all minimum node conditions are met.
+   */
+  void forceEnterChillMode();
+
+  /**
+   * Clears the manual chill mode flag.
+   */
+  void clearChillModeFlag();
+
+  /**
+   * Returns a chill mode status string.
+   * @return String
+   */
+  String getChillModeStatus();
+
+  /**
+   * Returns the status of manual chill mode flag.
+   * @return true if forceEnterChillMode has been called,
+   * false if forceExitChillMode or status is not set. eg. clearChillModeFlag.
+   */
+  boolean isInManualChillMode();
 
   /**
* Enum that represents the Node State. This is used in calls to getNodeList

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fc744c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
index e866dbc..da3710f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.ozone.scm.node;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
@@ -35,7 +36,6 @@ import java.util.Map;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -101,8 +101,9 @@ public class SCMNodeManager implements NodeManager {
  

hadoop git commit: HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu

2016-10-13 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d3c446514 -> 0884c31ae


HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by 
MingLiang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0884c31a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0884c31a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0884c31a

Branch: refs/heads/branch-2.8
Commit: 0884c31aec5313a9b251c63523436356e13a839d
Parents: d3c4465
Author: Brahma Reddy Battula 
Authored: Fri Oct 14 10:28:13 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Fri Oct 14 10:28:13 2016 +0530

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   8 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 102 +--
 2 files changed, 51 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0884c31a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index af28ec2..cc40359 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -908,8 +908,7 @@ public class DFSAdmin extends FsShell {
   System.out.println("Balancer bandwidth is " + bandwidth
   + " bytes per second.");
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2075,7 +2074,7 @@ public class DFSAdmin extends FsShell {
   dnProxy.evictWriters();
   System.out.println("Requested writer eviction to datanode " + dn);
 } catch (IOException ioe) {
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2086,8 +2085,7 @@ public class DFSAdmin extends FsShell {
   DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
   System.out.println(dnInfo.getDatanodeLocalReport());
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0884c31a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index dee1f88..4bcca85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 
 import com.google.common.collect.Lists;
@@ -68,6 +69,7 @@ public class TestDFSAdmin {
   @Before
   public void setUp() throws Exception {
 conf = new Configuration();
+conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
 restartCluster();
 
 admin = new DFSAdmin();
@@ -105,7 +107,7 @@ public class TestDFSAdmin {
 if (cluster != null) {
   cluster.shutdown();
 }
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 cluster.waitActive();
 datanode = cluster.getDataNodes().get(0);
   }
@@ -135,66 +137,58 @@ public class TestDFSAdmin {
   @Test(timeout = 3)
   public void testGetDatanodeInfo() throws Exception {
 redirectStream();
-final Configuration dfsConf = new HdfsConfiguration();
-final int numDn = 2;
-
-/* init cluster */
-try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
-.numDataNodes(numDn).build()) {
-
-  miniCluster.waitActive();
-  assertEquals(numDn, miniCluster.getDataNodes().size());
-  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+final DFSAdmin dfsAdmin = new DFSAdmin(conf);
 
-  /* init reused vars */
-  List outs = null;
-  int ret;
-
-  /**
-   * test erroneous run
-   */
+for (int i = 

hadoop git commit: HADOOP-13669. Addendum patch for KMS Server should log exceptions before throwing.

2016-10-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk cf3f43e95 -> ae51b11f7


HADOOP-13669. Addendum patch for KMS Server should log exceptions before 
throwing.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae51b11f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae51b11f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae51b11f

Branch: refs/heads/trunk
Commit: ae51b11f7872eaac558acf00fd23f6d7b1841cfe
Parents: cf3f43e
Author: Xiao Chen 
Authored: Thu Oct 13 22:32:08 2016 -0700
Committer: Xiao Chen 
Committed: Thu Oct 13 22:32:08 2016 -0700

--
 .../hadoop-kms/dev-support/findbugsExcludeFile.xml| 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae51b11f/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
index bc92ed7..78c4ca6 100644
--- a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
@@ -38,4 +38,11 @@
 
 
   
+  
+  
+
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13669. Addendum patch for KMS Server should log exceptions before throwing.

2016-10-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f0f2681b4 -> 562c358a8


HADOOP-13669. Addendum patch for KMS Server should log exceptions before 
throwing.

(cherry picked from commit ae51b11f7872eaac558acf00fd23f6d7b1841cfe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/562c358a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/562c358a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/562c358a

Branch: refs/heads/branch-2
Commit: 562c358a8327dbe8fa40dc59d2568ba7e7554e93
Parents: f0f2681
Author: Xiao Chen 
Authored: Thu Oct 13 22:32:08 2016 -0700
Committer: Xiao Chen 
Committed: Thu Oct 13 22:32:25 2016 -0700

--
 .../hadoop-kms/dev-support/findbugsExcludeFile.xml| 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/562c358a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
index bc92ed7..78c4ca6 100644
--- a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
@@ -38,4 +38,11 @@
 
 
   
+  
+  
+
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13669. Addendum patch for KMS Server should log exceptions before throwing.

2016-10-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 0884c31ae -> 1ea707004


HADOOP-13669. Addendum patch for KMS Server should log exceptions before 
throwing.

(cherry picked from commit ae51b11f7872eaac558acf00fd23f6d7b1841cfe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea70700
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea70700
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea70700

Branch: refs/heads/branch-2.8
Commit: 1ea7070042d6ed335c6c14ec3acf80e36d43b1f6
Parents: 0884c31
Author: Xiao Chen 
Authored: Thu Oct 13 22:32:08 2016 -0700
Committer: Xiao Chen 
Committed: Thu Oct 13 22:32:30 2016 -0700

--
 .../hadoop-kms/dev-support/findbugsExcludeFile.xml| 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea70700/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
index bc92ed7..78c4ca6 100644
--- a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
@@ -38,4 +38,11 @@
 
 
   
+  
+  
+
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13723. AliyunOSSInputStream#read() should update read bytes stat correctly. Contributed by Mingliang Liu

2016-10-13 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk ae51b11f7 -> d9f73f1b7


HADOOP-13723. AliyunOSSInputStream#read() should update read bytes stat 
correctly. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9f73f1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9f73f1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9f73f1b

Branch: refs/heads/trunk
Commit: d9f73f1b7cd893a7d88baa9bfd1b809a5dec9e59
Parents: ae51b11
Author: Mingliang Liu 
Authored: Thu Oct 13 17:05:28 2016 -0700
Committer: Mingliang Liu 
Committed: Thu Oct 13 22:33:55 2016 -0700

--
 .../java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9f73f1b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
index b87a3a7..a3af7ce 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
@@ -123,7 +123,7 @@ public class AliyunOSSInputStream extends FSInputStream {
 }
 
 if (statistics != null && byteRead >= 0) {
-  statistics.incrementBytesRead(1);
+  statistics.incrementBytesRead(byteRead);
 }
 return byteRead;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-5717. Add tests for container-executor is_feature_enabled. Contributed by Sidharta Seethana

2016-10-13 Thread cdouglas
YARN-5717. Add tests for container-executor is_feature_enabled. Contributed by 
Sidharta Seethana

(cherry picked from commit cf3f43e95bf46030875137fc36da5c1fbe14250d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0f2681b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0f2681b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0f2681b

Branch: refs/heads/branch-2
Commit: f0f2681b4fe2d99408f85fdad90a1557c4c62f14
Parents: ad69baf
Author: Chris Douglas 
Authored: Thu Oct 13 20:47:49 2016 -0700
Committer: Chris Douglas 
Committed: Thu Oct 13 20:49:50 2016 -0700

--
 .../impl/container-executor.c   | 11 ++---
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 42 
 .../test/test-container-executor.c  | 51 
 4 files changed, 79 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0f2681b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 4b69c6a..2b9f060 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -415,9 +415,9 @@ int change_user(uid_t user, gid_t group) {
   return 0;
 }
 
-
-static int is_feature_enabled(const char* feature_key, int default_value) {
-char *enabled_str = get_value(feature_key, _cfg);
+int is_feature_enabled(const char* feature_key, int default_value,
+  struct configuration *cfg) {
+char *enabled_str = get_value(feature_key, cfg);
 int enabled = default_value;
 
 if (enabled_str != NULL) {
@@ -441,15 +441,14 @@ static int is_feature_enabled(const char* feature_key, 
int default_value) {
 }
 }
 
-
 int is_docker_support_enabled() {
 return is_feature_enabled(DOCKER_SUPPORT_ENABLED_KEY,
-DEFAULT_DOCKER_SUPPORT_ENABLED);
+DEFAULT_DOCKER_SUPPORT_ENABLED, _cfg);
 }
 
 int is_tc_support_enabled() {
 return is_feature_enabled(TC_SUPPORT_ENABLED_KEY,
-DEFAULT_TC_SUPPORT_ENABLED);
+DEFAULT_TC_SUPPORT_ENABLED, _cfg);
 }
 
 char* check_docker_binary(char *docker_binary) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0f2681b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index c4a411d..2b3a3ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -258,6 +258,10 @@ int check_dir(const char* npath, mode_t st_mode, mode_t 
desired,
 int create_validate_dir(const char* npath, mode_t perm, const char* path,
int finalComponent);
 
+/** Check if a feature is enabled in the specified configuration. */
+int is_feature_enabled(const char* feature_key, int default_value,
+  struct configuration *cfg);
+
 /** Check if tc (traffic control) support is enabled in configuration. */
 int is_tc_support_enabled();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0f2681b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 

[1/2] hadoop git commit: YARN-5717. Add tests for container-executor is_feature_enabled. Contributed by Sidharta Seethana

2016-10-13 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ad69baf6a -> f0f2681b4
  refs/heads/trunk 0a85d0798 -> cf3f43e95


YARN-5717. Add tests for container-executor is_feature_enabled. Contributed by 
Sidharta Seethana


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf3f43e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf3f43e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf3f43e9

Branch: refs/heads/trunk
Commit: cf3f43e95bf46030875137fc36da5c1fbe14250d
Parents: 0a85d07
Author: Chris Douglas 
Authored: Thu Oct 13 20:47:49 2016 -0700
Committer: Chris Douglas 
Committed: Thu Oct 13 20:49:07 2016 -0700

--
 .../impl/container-executor.c   | 11 ++---
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 42 
 .../test/test-container-executor.c  | 51 
 4 files changed, 79 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index a9a7e96..8a995b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -422,9 +422,9 @@ int change_user(uid_t user, gid_t group) {
   return 0;
 }
 
-
-static int is_feature_enabled(const char* feature_key, int default_value) {
-char *enabled_str = get_value(feature_key, _cfg);
+int is_feature_enabled(const char* feature_key, int default_value,
+  struct configuration *cfg) {
+char *enabled_str = get_value(feature_key, cfg);
 int enabled = default_value;
 
 if (enabled_str != NULL) {
@@ -448,15 +448,14 @@ static int is_feature_enabled(const char* feature_key, 
int default_value) {
 }
 }
 
-
 int is_docker_support_enabled() {
 return is_feature_enabled(DOCKER_SUPPORT_ENABLED_KEY,
-DEFAULT_DOCKER_SUPPORT_ENABLED);
+DEFAULT_DOCKER_SUPPORT_ENABLED, _cfg);
 }
 
 int is_tc_support_enabled() {
 return is_feature_enabled(TC_SUPPORT_ENABLED_KEY,
-DEFAULT_TC_SUPPORT_ENABLED);
+DEFAULT_TC_SUPPORT_ENABLED, _cfg);
 }
 
 char* check_docker_binary(char *docker_binary) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index 5c17b29..8ad5d47 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -264,6 +264,10 @@ int check_dir(const char* npath, mode_t st_mode, mode_t 
desired,
 int create_validate_dir(const char* npath, mode_t perm, const char* path,
int finalComponent);
 
+/** Check if a feature is enabled in the specified configuration. */
+int is_feature_enabled(const char* feature_key, int default_value,
+  struct configuration *cfg);
+
 /** Check if tc (traffic control) support is enabled in configuration. */
 int is_tc_support_enabled();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 

[2/2] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2016-10-13 Thread subru
YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/def82114
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/def82114
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/def82114

Branch: refs/heads/YARN-2915
Commit: def821140fcbb8ddb1bdf447a73e777c591c3704
Parents: 0bf6bbb
Author: Subru Krishnan 
Authored: Thu Oct 13 17:59:13 2016 -0700
Committer: Subru Krishnan 
Committed: Thu Oct 13 17:59:13 2016 -0700

--
 .../AbstractConfigurableFederationPolicy.java   | 155 +
 .../policies/ConfigurableFederationPolicy.java  |   9 +-
 .../FederationPolicyInitializationContext.java  |  37 +-
 ...ionPolicyInitializationContextValidator.java |  28 +-
 .../policies/FederationPolicyManager.java   |  59 +-
 .../amrmproxy/AbstractAMRMProxyPolicy.java  |  47 ++
 .../amrmproxy/BroadcastAMRMProxyPolicy.java |  85 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  25 +-
 .../LocalityMulticastAMRMProxyPolicy.java   | 583 +++
 .../policies/amrmproxy/package-info.java|   1 -
 .../policies/dao/WeightedPolicyInfo.java| 180 +++---
 .../federation/policies/dao/package-info.java   |   1 -
 .../policies/exceptions/package-info.java   |   1 -
 .../federation/policies/package-info.java   |   1 -
 .../policies/router/AbstractRouterPolicy.java   |  47 ++
 .../router/BaseWeightedRouterPolicy.java| 150 -
 .../policies/router/FederationRouterPolicy.java |   5 +-
 .../policies/router/LoadBasedRouterPolicy.java  |  36 +-
 .../policies/router/PriorityRouterPolicy.java   |  19 +-
 .../router/UniformRandomRouterPolicy.java   |  28 +-
 .../router/WeightedRandomRouterPolicy.java  |  32 +-
 .../policies/router/package-info.java   |   1 -
 .../resolver/AbstractSubClusterResolver.java|   4 +-
 .../policies/BaseFederationPoliciesTest.java|  28 +-
 ...ionPolicyInitializationContextValidator.java |  25 +-
 .../TestBroadcastAMRMProxyFederationPolicy.java | 112 
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 566 ++
 .../router/TestLoadBasedRouterPolicy.java   |  18 +-
 .../router/TestPriorityRouterPolicy.java|  15 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  35 +-
 .../utils/FederationPoliciesTestUtil.java   |  64 ++
 .../src/test/resources/nodes|   6 +-
 32 files changed, 1950 insertions(+), 453 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/def82114/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
new file mode 100644
index 000..4cb9bbe
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import java.util.Map;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.NoActiveSubclustersException;
+import 

[1/2] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2016-10-13 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 0bf6bbb32 -> def821140


http://git-wip-us.apache.org/repos/asf/hadoop/blob/def82114/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
index e57709f..5de749f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
@@ -17,8 +17,8 @@
 
 package org.apache.hadoop.yarn.server.federation.policies.router;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import java.util.Map;
+
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
@@ -30,34 +30,27 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 
-import java.util.Map;
-
 /**
  * This implements a simple load-balancing policy. The policy "weights" are
  * binary 0/1 values that enable/disable each sub-cluster, and the policy peaks
  * the sub-cluster with the least load to forward this application.
  */
-public class LoadBasedRouterPolicy
-extends BaseWeightedRouterPolicy {
-
-  private static final Log LOG =
-  LogFactory.getLog(LoadBasedRouterPolicy.class);
+public class LoadBasedRouterPolicy extends AbstractRouterPolicy {
 
   @Override
-  public void reinitialize(FederationPolicyInitializationContext
-  federationPolicyContext)
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
   throws FederationPolicyInitializationException {
 
 // remember old policyInfo
 WeightedPolicyInfo tempPolicy = getPolicyInfo();
 
-//attempt new initialization
-super.reinitialize(federationPolicyContext);
+// attempt new initialization
+super.reinitialize(policyContext);
 
-//check extra constraints
+// check extra constraints
 for (Float weight : getPolicyInfo().getRouterPolicyWeights().values()) {
   if (weight != 0 && weight != 1) {
-//reset to old policyInfo if check fails
+// reset to old policyInfo if check fails
 setPolicyInfo(tempPolicy);
 throw new FederationPolicyInitializationException(
 this.getClass().getCanonicalName()
@@ -69,18 +62,16 @@ public class LoadBasedRouterPolicy
 
   @Override
   public SubClusterId getHomeSubcluster(
-  ApplicationSubmissionContext appSubmissionContext)
-  throws YarnException {
+  ApplicationSubmissionContext appSubmissionContext) throws YarnException {
 
 Map activeSubclusters =
 getActiveSubclusters();
 
-Map weights = getPolicyInfo()
-.getRouterPolicyWeights();
+Map weights =
+getPolicyInfo().getRouterPolicyWeights();
 SubClusterIdInfo chosen = null;
 long currBestMem = -1;
-for (Map.Entry entry :
-activeSubclusters
+for (Map.Entry entry : activeSubclusters
 .entrySet()) {
   SubClusterIdInfo id = new SubClusterIdInfo(entry.getKey());
   if (weights.containsKey(id) && weights.get(id) > 0) {
@@ -95,8 +86,7 @@ public class LoadBasedRouterPolicy
 return chosen.toId();
   }
 
-  private long getAvailableMemory(SubClusterInfo value)
-  throws YarnException {
+  private long getAvailableMemory(SubClusterInfo value) throws YarnException {
 try {
   long mem = -1;
   JSONObject obj = new JSONObject(value.getCapability());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def82114/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
 

hadoop git commit: HADOOP-13417. Fix javac and checkstyle warnings in hadoop-auth package.

2016-10-13 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk d9f73f1b7 -> 5a5a72473


HADOOP-13417. Fix javac and checkstyle warnings in hadoop-auth package.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a5a7247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a5a7247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a5a7247

Branch: refs/heads/trunk
Commit: 5a5a724731b74df9eed2de5f3370bcb8023fa2eb
Parents: d9f73f1
Author: Akira Ajisaka 
Authored: Fri Oct 14 14:45:55 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Oct 14 14:45:55 2016 +0900

--
 .../client/AuthenticatorTestCase.java   | 49 
 1 file changed, 29 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a5a7247/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
index 8f35e13..35e40d8 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
@@ -20,14 +20,15 @@ import 
org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.http.HttpResponse;
 import org.apache.http.auth.AuthScope;
 import org.apache.http.auth.Credentials;
+import org.apache.http.client.CredentialsProvider;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.client.methods.HttpPost;
 import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.client.params.AuthPolicy;
 import org.apache.http.entity.InputStreamEntity;
-import org.apache.http.impl.auth.SPNegoSchemeFactory;
-import org.apache.http.impl.client.SystemDefaultHttpClient;
+import org.apache.http.impl.auth.SPNegoScheme;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.http.impl.client.HttpClientBuilder;
 import org.apache.http.util.EntityUtils;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.servlet.Context;
@@ -53,6 +54,7 @@ import java.net.ServerSocket;
 import java.net.URL;
 import java.security.Principal;
 import java.util.Properties;
+
 import org.junit.Assert;
 
 public class AuthenticatorTestCase {
@@ -241,22 +243,29 @@ public class AuthenticatorTestCase {
 }
   }
 
-  private SystemDefaultHttpClient getHttpClient() {
-final SystemDefaultHttpClient httpClient = new SystemDefaultHttpClient();
-httpClient.getAuthSchemes().register(AuthPolicy.SPNEGO, new 
SPNegoSchemeFactory(true));
- Credentials use_jaas_creds = new Credentials() {
-   public String getPassword() {
- return null;
-   }
-
-   public Principal getUserPrincipal() {
- return null;
-   }
- };
-
- httpClient.getCredentialsProvider().setCredentials(
-   AuthScope.ANY, use_jaas_creds);
- return httpClient;
+  private HttpClient getHttpClient() {
+HttpClientBuilder builder = HttpClientBuilder.create();
+// Register auth schema
+builder.setDefaultAuthSchemeRegistry(
+s-> httpContext -> new SPNegoScheme(true, true)
+);
+
+Credentials useJaasCreds = new Credentials() {
+  public String getPassword() {
+return null;
+  }
+  public Principal getUserPrincipal() {
+return null;
+  }
+};
+
+CredentialsProvider jaasCredentialProvider
+= new BasicCredentialsProvider();
+jaasCredentialProvider.setCredentials(AuthScope.ANY, useJaasCreds);
+// Set credential provider
+builder.setDefaultCredentialsProvider(jaasCredentialProvider);
+
+return builder.build();
   }
 
   private void doHttpClientRequest(HttpClient httpClient, HttpUriRequest 
request) throws Exception {
@@ -273,7 +282,7 @@ public class AuthenticatorTestCase {
   protected void _testAuthenticationHttpClient(Authenticator authenticator, 
boolean doPost) throws Exception {
 start();
 try {
-  SystemDefaultHttpClient httpClient = getHttpClient();
+  HttpClient httpClient = getHttpClient();
   doHttpClientRequest(httpClient, new HttpGet(getBaseURL()));
 
   // Always do a GET before POST to trigger the SPNego negotiation


-
To unsubscribe, e-mail: 

hadoop git commit: HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. Contributed by Kihwal Lee.

2016-10-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c7780b066 -> 78777d4a9


HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. 
Contributed by Kihwal Lee.

(cherry picked from commit 9454dc5e8091354cd0a4b8c8aa5f4004529db5d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78777d4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78777d4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78777d4a

Branch: refs/heads/branch-2.8
Commit: 78777d4a96852d6734dbd62217edeec0f862113a
Parents: c7780b0
Author: Kihwal Lee 
Authored: Thu Oct 13 09:02:59 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 09:02:59 2016 -0500

--
 .../hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78777d4a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index b651410..1941b77 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -330,7 +330,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, userPrincipal.getName());
+  namenode, ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 final String query = op.toQueryString() + delegationQuery


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org