hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

2018-02-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 cb9e60e3d -> f043aa953


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.

(cherry picked from commit 8faf0b50d435039f69ea35f592856ca04d378809)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f043aa95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f043aa95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f043aa95

Branch: refs/heads/branch-2.8
Commit: f043aa953b3ee263ad8b1d50594edc72ce4ceccb
Parents: cb9e60e
Author: Xiaoyu Yao 
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 13:26:05 2018 -0800

--
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++
 2 files changed, 129 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f043aa95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index c51b178..14df725 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -246,7 +246,9 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.clearBlocks();
-nodeToRemove.getParent().removeChild(nodeToRemove);
+// Ensure the nodeToRemove is cleared from snapshot diff list
+nodeToRemove.getParent().removeChild(nodeToRemove,
+targetIIP.getLatestSnapshotId());
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f043aa95/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..14c24a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang.math.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
 // make sure bar has been cleaned from inodeMap
 Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+final Path st = new Path("/st");
+hdfs.mkdirs(st);
+hdfs.allowSnapshot(st);
+
+

hadoop git commit: HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by Yiqun Lin.

2018-02-08 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 77c6439c3 -> 050aa531b


HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by 
Yiqun Lin.

(cherry picked from commit 543f3abbee79d7ec70353f0cdda6397ee001324e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/050aa531
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/050aa531
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/050aa531

Branch: refs/heads/branch-2.9
Commit: 050aa531ba7a419f83a4f48b0dac1ffbbdad6d1b
Parents: 77c6439
Author: Yiqun Lin 
Authored: Fri Feb 9 13:57:42 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Feb 9 14:03:13 2018 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 ++--
 .../src/main/resources/hdfs-default.xml| 10 --
 .../src/site/markdown/HDFSRouterFederation.md  |  2 +-
 .../server/federation/RouterConfigBuilder.java |  6 ++
 .../store/FederationStateStoreTestUtils.java   | 17 +++--
 5 files changed, 28 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/050aa531/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e1d5806..dac9e71 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -33,8 +33,8 @@ import 
org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformance
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1115,7 +1115,7 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_STORE_DRIVER_CLASS =
   FEDERATION_STORE_PREFIX + "driver.class";
   public static final Class
-  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
 
   public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
   FEDERATION_STORE_PREFIX + "connection.test";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/050aa531/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index cb4616e..5bbd853 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4614,9 +4614,15 @@
 
   
 dfs.federation.router.store.driver.class
-
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl
+
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl
 
-  Class to implement the State Store. By default it uses the local disk.
+  Class to implement the State Store. There are three implementation 
classes currently
+  being supported:
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl,
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl
 and
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.
+  These implementation classes use the local file, filesystem and 
ZooKeeper as a backend respectively.
+  By default it uses the ZooKeeper as the default State Store.
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/050aa531/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index b1e9ac2..713fa7a 100644
--- 

hadoop git commit: HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by Yiqun Lin.

2018-02-08 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9873eb63a -> d49074f0a


HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by 
Yiqun Lin.

(cherry picked from commit 543f3abbee79d7ec70353f0cdda6397ee001324e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d49074f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d49074f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d49074f0

Branch: refs/heads/branch-2
Commit: d49074f0a2099faecc5db622a11d2d1208eb5133
Parents: 9873eb6
Author: Yiqun Lin 
Authored: Fri Feb 9 13:57:42 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Feb 9 14:00:55 2018 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 ++--
 .../src/main/resources/hdfs-default.xml| 10 --
 .../src/site/markdown/HDFSRouterFederation.md  |  2 +-
 .../server/federation/RouterConfigBuilder.java |  6 ++
 .../store/FederationStateStoreTestUtils.java   | 17 +++--
 5 files changed, 28 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49074f0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9160111..61cbece 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -33,8 +33,8 @@ import 
org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformance
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1115,7 +1115,7 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_STORE_DRIVER_CLASS =
   FEDERATION_STORE_PREFIX + "driver.class";
   public static final Class
-  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
 
   public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
   FEDERATION_STORE_PREFIX + "connection.test";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49074f0/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 33fbe91..68a1865 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4625,9 +4625,15 @@
 
   
 dfs.federation.router.store.driver.class
-
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl
+
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl
 
-  Class to implement the State Store. By default it uses the local disk.
+  Class to implement the State Store. There are three implementation 
classes currently
+  being supported:
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl,
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl
 and
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.
+  These implementation classes use the local file, filesystem and 
ZooKeeper as a backend respectively.
+  By default it uses the ZooKeeper as the default State Store.
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49074f0/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index 5649755..ebe94a0 100644
--- 

hadoop git commit: HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by Yiqun Lin.

2018-02-08 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 49ab09f00 -> 38febc0ae


HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by 
Yiqun Lin.

(cherry picked from commit 543f3abbee79d7ec70353f0cdda6397ee001324e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38febc0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38febc0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38febc0a

Branch: refs/heads/branch-3.0
Commit: 38febc0ae2c6ee0647c206643a52dfbbe54ff9a2
Parents: 49ab09f
Author: Yiqun Lin 
Authored: Fri Feb 9 13:57:42 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Feb 9 13:59:38 2018 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 ++--
 .../src/main/resources/hdfs-default.xml| 10 --
 .../src/site/markdown/HDFSRouterFederation.md  |  2 +-
 .../server/federation/RouterConfigBuilder.java |  6 ++
 .../store/FederationStateStoreTestUtils.java   | 17 +++--
 5 files changed, 28 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38febc0a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d7d998b..0049eca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -34,8 +34,8 @@ import 
org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformance
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1237,7 +1237,7 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_STORE_DRIVER_CLASS =
   FEDERATION_STORE_PREFIX + "driver.class";
   public static final Class
-  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
 
   public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
   FEDERATION_STORE_PREFIX + "connection.test";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38febc0a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index aaf9fd5..cd1437f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4945,9 +4945,15 @@
 
   
 dfs.federation.router.store.driver.class
-
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl
+
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl
 
-  Class to implement the State Store. By default it uses the local disk.
+  Class to implement the State Store. There are three implementation 
classes currently
+  being supported:
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl,
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl
 and
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.
+  These implementation classes use the local file, filesystem and 
ZooKeeper as a backend respectively.
+  By default it uses the ZooKeeper as the default State Store.
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38febc0a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index b1e9ac2..713fa7a 100644
--- 

hadoop git commit: HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by Yiqun Lin.

2018-02-08 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk ddec08d7c -> 543f3abbe


HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/543f3abb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/543f3abb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/543f3abb

Branch: refs/heads/trunk
Commit: 543f3abbee79d7ec70353f0cdda6397ee001324e
Parents: ddec08d
Author: Yiqun Lin 
Authored: Fri Feb 9 13:57:42 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Feb 9 13:57:42 2018 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 ++--
 .../src/main/resources/hdfs-default.xml| 10 --
 .../src/site/markdown/HDFSRouterFederation.md  |  2 +-
 .../server/federation/RouterConfigBuilder.java |  6 ++
 .../store/FederationStateStoreTestUtils.java   | 17 +++--
 5 files changed, 28 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e0b5b85..c0ad4ec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -34,8 +34,8 @@ import 
org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformance
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1275,7 +1275,7 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_STORE_DRIVER_CLASS =
   FEDERATION_STORE_PREFIX + "driver.class";
   public static final Class
-  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
 
   public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
   FEDERATION_STORE_PREFIX + "connection.test";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 59df122..f6d232e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -5085,9 +5085,15 @@
 
   
 dfs.federation.router.store.driver.class
-
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl
+
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl
 
-  Class to implement the State Store. By default it uses the local disk.
+  Class to implement the State Store. There are three implementation 
classes currently
+  being supported:
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl,
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl
 and
+  
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.
+  These implementation classes use the local file, filesystem and 
ZooKeeper as a backend respectively.
+  By default it uses the ZooKeeper as the default State Store.
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index 5649755..ebe94a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ 

hadoop git commit: YARN-7827. Stop and Delete Yarn Service from RM UI fails with HTTP ERROR 404. Contributed by Sunil G

2018-02-08 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1bc03ddf9 -> ddec08d7c


YARN-7827. Stop and Delete Yarn Service from RM UI fails with HTTP ERROR 404. 
Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddec08d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddec08d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddec08d7

Branch: refs/heads/trunk
Commit: ddec08d7ccc8e43492fca2784203bd8af5e968cc
Parents: 1bc03dd
Author: Jian He 
Authored: Thu Feb 8 21:32:02 2018 -0800
Committer: Jian He 
Committed: Thu Feb 8 21:32:40 2018 -0800

--
 .../src/main/webapp/app/adapters/yarn-servicedef.js |  9 ++---
 .../src/main/webapp/app/components/deploy-service.js| 12 +---
 .../src/main/webapp/app/controllers/yarn-app.js |  4 ++--
 .../src/main/webapp/app/controllers/yarn-app/info.js|  4 ++--
 .../main/webapp/app/controllers/yarn-deploy-service.js  | 12 ++--
 .../webapp/app/templates/components/deploy-service.hbs  | 10 ++
 .../src/main/webapp/app/templates/yarn-app.hbs  |  4 ++--
 .../src/main/webapp/app/templates/yarn-app/info.hbs |  4 ++--
 .../src/main/webapp/app/utils/info-seeder.js|  3 ++-
 .../src/main/webapp/config/default-config.js|  2 +-
 10 files changed, 42 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
index 3fb4a81..03685fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
@@ -24,21 +24,24 @@ export default RESTAbstractAdapter.extend({
   restNameSpace: "dashService",
   serverName: "DASH",
 
-  deployService(request) {
+  deployService(request, user) {
 var url = this.buildURL();
+url += "/?user.name=" + user;
 return this.ajax(url, "POST", {data: request});
   },
 
-  stopService(serviceName) {
+  stopService(serviceName, user) {
 var url = this.buildURL();
 url += "/" + serviceName;
+url += "/?user.name=" + user;
 var data = {"state": "STOPPED", "name": serviceName};
 return this.ajax(url, "PUT", {data: data});
   },
 
-  deleteService(serviceName) {
+  deleteService(serviceName, user) {
 var url = this.buildURL();
 url += "/" + serviceName;
+url += "/?user.name=" + user;
 return this.ajax(url, "DELETE", {data: {}});
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
index 90e10e5..36895d7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
@@ -27,6 +27,7 @@ export default Ember.Component.extend({
   customServiceDef: '',
   serviceResp: null,
   isLoading: false,
+  userName: '',
 
   actions: {
 showSaveTemplateModal() {
@@ -36,11 +37,11 @@ export default Ember.Component.extend({
 deployService() {
   this.set('serviceResp', null);
   if (this.get('isStandardViewType')) {
-this.sendAction("deployServiceDef", this.get('serviceDef'));
+this.sendAction("deployServiceDef", this.get('serviceDef'), 
this.get('userName'));
   } else {
 try {
   var parsed = JSON.parse(this.get('customServiceDef'));
-  this.sendAction("deployServiceJson", parsed);
+  this.sendAction("deployServiceJson", parsed, this.get('userName'));
 } catch (err) {
   this.set('serviceResp', {type: 'error', message: 'Invalid JSON: ' + 
err.message});
   throw err;
@@ -148,16 +149,21 @@ export default Ember.Component.extend({
 
   isValidTemplateName: Ember.computed.notEmpty('savedTemplateName'),
 
+  isUserNameGiven: Ember.computed.empty('userName'),
+
   isValidServiceDef: Ember.computed('serviceDef.name', 'serviceDef.queue', 
'serviceDef.serviceComponents.[]', function () {
 return 

hadoop git commit: HDFS-7959. WebHdfs logging is missing on Datanode (Kihwal Lee via sjlee)

2018-02-08 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 dc57c1d18 -> 53a72f634


HDFS-7959. WebHdfs logging is missing on Datanode (Kihwal Lee via sjlee)

(cherry picked from commit ae90d4dd908cf3f9e9ff26fa8e92f028057a9ca1)

Backport HDFS-13126 by Erik Krogen

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53a72f63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53a72f63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53a72f63

Branch: refs/heads/branch-2.7
Commit: 53a72f63471a2e66c37bd00210114b13b27e8e7c
Parents: dc57c1d
Author: Kihwal Lee 
Authored: Fri Aug 19 15:32:11 2016 -0500
Committer: Konstantin V Shvachko 
Committed: Thu Feb 8 19:09:21 2018 -0800

--
 .../src/main/conf/log4j.properties  | 11 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../datanode/web/webhdfs/WebHdfsHandler.java| 38 +++-
 3 files changed, 44 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53a72f63/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 316c48e..e435c10 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -266,3 +266,14 @@ 
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 
#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
 
#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-_mm_dd.log
 #log4j.appender.nodemanagerrequestlog.RetainDays=3
+
+# WebHdfs request log on datanodes
+# Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to
+# direct the log to a separate file.
+#datanode.webhdfs.logger=INFO,console
+#log4j.logger.datanode.webhdfs=${datanode.webhdfs.logger}
+#log4j.appender.HTTPDRFA=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HTTPDRFA.File=${hadoop.log.dir}/hadoop-datanode-webhdfs.log
+#log4j.appender.HTTPDRFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HTTPDRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+#log4j.appender.HTTPDRFA.DatePattern=.-MM-dd

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53a72f63/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3077906..ade5516 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -29,6 +29,9 @@ Release 2.7.6 - UNRELEASED
 
 HDFS-13120. Snapshot diff could be corrupted after concat. (xyao)
 
+HDFS-7959. WebHdfs logging is missing on Datanode (Kihwal Lee via sjlee)
+Backport HDFS-13126 by Erik Krogen.
+
 Release 2.7.5 - 2017-12-14
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53a72f63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index 4d705b0..f510447 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -29,6 +29,7 @@ import io.netty.handler.codec.http.HttpMethod;
 import io.netty.handler.codec.http.HttpRequest;
 import io.netty.handler.codec.http.QueryStringDecoder;
 import io.netty.handler.stream.ChunkedStream;
+import java.net.InetSocketAddress;
 import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -71,11 +72,13 @@ import static 
io.netty.handler.codec.http.HttpResponseStatus.CONTINUE;
 import static io.netty.handler.codec.http.HttpResponseStatus.CREATED;
 import static io.netty.handler.codec.http.HttpResponseStatus.OK;
 import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+import static 
io.netty.handler.codec.rtsp.RtspResponseStatuses.INTERNAL_SERVER_ERROR;
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HDFS_URI_SCHEME;
 import static 

hadoop git commit: Revert "HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao."

2018-02-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7aec9fac4 -> cb9e60e3d


Revert "HDFS-13120. Snapshot diff could be corrupted after concat. Contributed 
by Xiaoyu Yao."

This reverts commit 7aec9fac4a72e7c868c9c1c7b4252a746694c936.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb9e60e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb9e60e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb9e60e3

Branch: refs/heads/branch-2.8
Commit: cb9e60e3d7f45222870ef759ce8e30769e97d15f
Parents: 7aec9fa
Author: Kihwal Lee 
Authored: Thu Feb 8 15:01:26 2018 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 8 15:01:26 2018 -0600

--
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 ---
 2 files changed, 1 insertion(+), 129 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb9e60e3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 14df725..c51b178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -246,9 +246,7 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.clearBlocks();
-// Ensure the nodeToRemove is cleared from snapshot diff list
-nodeToRemove.getParent().removeChild(nodeToRemove,
-targetIIP.getLatestSnapshotId());
+nodeToRemove.getParent().removeChild(nodeToRemove);
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb9e60e3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index 8bd7967..ca53788 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,22 +26,18 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
-import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -65,15 +61,11 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
-  private static final Logger LOG =
-  LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1240,122 +1232,4 @@ public class TestSnapshotDeletion {
 // make sure bar has been cleaned from inodeMap
 Assert.assertNull(fsdir.getInode(fileId));
   }
-
-  @Test
-  public void testSnapshotWithConcatException() throws Exception {
-final Path st = new Path("/st");
-hdfs.mkdirs(st);
-hdfs.allowSnapshot(st);
-
-  

hadoop git commit: YARN-7655. Avoid AM preemption caused by RRs for specific nodes or racks. Contributed by Steven Rand.

2018-02-08 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/trunk eb2449d53 -> 1bc03ddf9


YARN-7655. Avoid AM preemption caused by RRs for specific nodes or racks. 
Contributed by Steven Rand.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bc03ddf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bc03ddf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bc03ddf

Branch: refs/heads/trunk
Commit: 1bc03ddf97f3f0e0ecc1b00217438d3c91d29be5
Parents: eb2449d
Author: Yufei Gu 
Authored: Thu Feb 8 12:32:43 2018 -0800
Committer: Yufei Gu 
Committed: Thu Feb 8 12:32:43 2018 -0800

--
 .../scheduler/fair/FSPreemptionThread.java  | 62 +---
 .../fair/TestFairSchedulerPreemption.java   | 55 +
 2 files changed, 96 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bc03ddf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index c05bff9..c32565f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
@@ -99,7 +99,10 @@ class FSPreemptionThread extends Thread {
* starvation.
* 2. For each {@link ResourceRequest}, iterate through matching
* nodes and identify containers to preempt all on one node, also
-   * optimizing for least number of AM container preemptions.
+   * optimizing for least number of AM container preemptions. Only nodes
+   * that match the locality level specified in the {@link ResourceRequest}
+   * are considered. However, if this would lead to AM preemption, and locality
+   * relaxation is allowed, then the search space is expanded to all nodes.
*
* @param starvedApp starved application for which we are identifying
*   preemption targets
@@ -111,27 +114,21 @@ class FSPreemptionThread extends Thread {
 
 // Iterate through enough RRs to address app's starvation
 for (ResourceRequest rr : starvedApp.getStarvedResourceRequests()) {
+  List potentialNodes = scheduler.getNodeTracker()
+  .getNodesByResourceName(rr.getResourceName());
   for (int i = 0; i < rr.getNumContainers(); i++) {
-PreemptableContainers bestContainers = null;
-List potentialNodes = scheduler.getNodeTracker()
-.getNodesByResourceName(rr.getResourceName());
-int maxAMContainers = Integer.MAX_VALUE;
-
-for (FSSchedulerNode node : potentialNodes) {
-  PreemptableContainers preemptableContainers =
-  identifyContainersToPreemptOnNode(
-  rr.getCapability(), node, maxAMContainers);
-
-  if (preemptableContainers != null) {
-// This set is better than any previously identified set.
-bestContainers = preemptableContainers;
-maxAMContainers = bestContainers.numAMContainers;
-
-if (maxAMContainers == 0) {
-  break;
-}
-  }
-} // End of iteration through nodes for one RR
+PreemptableContainers bestContainers =
+identifyContainersToPreemptForOneContainer(potentialNodes, rr);
+
+// Don't preempt AM containers just to satisfy local requests if relax
+// locality is enabled.
+if (bestContainers != null
+&& bestContainers.numAMContainers > 0
+&& !ResourceRequest.isAnyLocation(rr.getResourceName())
+&& rr.getRelaxLocality()) {
+  bestContainers = identifyContainersToPreemptForOneContainer(
+  scheduler.getNodeTracker().getAllNodes(), rr);
+}
 
 if (bestContainers != null) {
   List containers = bestContainers.getAllContainers();
@@ -154,6 +151,29 @@ class FSPreemptionThread extends Thread {
 return containersToPreempt;
   }
 
+  private PreemptableContainers identifyContainersToPreemptForOneContainer(
+  List 

hadoop git commit: Update version to 3.0.1 for release.

2018-02-08 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 4a3e96a6b -> e716b4359


Update version to 3.0.1 for release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e716b435
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e716b435
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e716b435

Branch: refs/heads/branch-3.0.1
Commit: e716b4359b328d25429e5e2b3f3dabd843c1c9d9
Parents: 4a3e96a
Author: Lei Xu 
Authored: Thu Feb 8 12:02:05 2018 -0800
Committer: Lei Xu 
Committed: Thu Feb 8 12:02:05 2018 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 

hadoop git commit: YARN-5428. Allow for specifying the docker client configuration directory. Contributed by Shane Kumpf

2018-02-08 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 996796f10 -> eb2449d53


YARN-5428. Allow for specifying the docker client configuration directory. 
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb2449d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb2449d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb2449d5

Branch: refs/heads/trunk
Commit: eb2449d5398e9ac869bc088e10d838a7f13deac0
Parents: 996796f
Author: Jian He 
Authored: Wed Feb 7 10:59:38 2018 -0800
Committer: Jian He 
Committed: Thu Feb 8 11:35:30 2018 -0800

--
 .../applications/distributedshell/Client.java   |  38 +++-
 .../DockerCredentialTokenIdentifier.java| 159 
 .../yarn/util/DockerClientConfigHandler.java| 183 +++
 .../src/main/proto/yarn_security_token.proto|   5 +
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 .../security/TestDockerClientConfigHandler.java | 129 +
 .../runtime/DockerLinuxContainerRuntime.java|  39 
 .../linux/runtime/docker/DockerCommand.java |  16 ++
 .../runtime/TestDockerContainerRuntime.java | 109 +++
 .../runtime/docker/TestDockerRunCommand.java|   8 +
 .../src/site/markdown/DockerContainers.md   |  13 +-
 11 files changed, 690 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 2aafa94..0aef83f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -87,6 +87,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -225,6 +226,9 @@ public class Client {
   private String flowVersion = null;
   private long flowRunId = 0L;
 
+  // Docker client configuration
+  private String dockerClientConfig = null;
+
   // Command line options
   private Options opts;
 
@@ -368,6 +372,10 @@ public class Client {
 "If container could retry, it specifies max retires");
 opts.addOption("container_retry_interval", true,
 "Interval between each retry, unit is milliseconds");
+opts.addOption("docker_client_config", true,
+"The docker client configuration path. The scheme should be supplied"
++ " (i.e. file:// or hdfs://)."
++ " Only used when the Docker runtime is enabled and requested.");
 opts.addOption("placement_spec", true,
 "Placement specification. Please note, if this option is specified,"
 + " The \"num_containers\" option will be ignored. All requested"
@@ -585,6 +593,9 @@ public class Client {
 "Flow run is not a valid long value", e);
   }
 }
+if (cliParser.hasOption("docker_client_config")) {
+  dockerClientConfig = cliParser.getOptionValue("docker_client_config");
+}
 return true;
   }
 
@@ -884,9 +895,10 @@ public class Client {
 // amContainer.setServiceData(serviceData);
 
 // Setup security tokens
+Credentials rmCredentials = null;
 if (UserGroupInformation.isSecurityEnabled()) {
   // Note: Credentials class is marked as LimitedPrivate for HDFS and 
MapReduce
-  Credentials credentials = new Credentials();
+  rmCredentials = new Credentials();
   String tokenRenewer = YarnClientUtils.getRmPrincipal(conf);
   if (tokenRenewer == null || tokenRenewer.length() == 0) {
 throw new IOException(
@@ -895,16 +907,32 @@ public class Client {
 
   // 

hadoop git commit: HADOOP-15214. Make Hadoop compatible with Guava 21.0. Contributed by Igor Dvorzhak

2018-02-08 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8faf0b50d -> 996796f10


HADOOP-15214. Make Hadoop compatible with Guava 21.0.
Contributed by Igor Dvorzhak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/996796f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/996796f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/996796f1

Branch: refs/heads/trunk
Commit: 996796f1048369e0f307f935ba01af64cc751a85
Parents: 8faf0b5
Author: Steve Loughran 
Authored: Thu Feb 8 10:55:54 2018 -0800
Committer: Steve Loughran 
Committed: Thu Feb 8 10:55:54 2018 -0800

--
 .../src/main/java/org/apache/hadoop/util/RunJar.java | 2 +-
 .../main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java   | 4 ++--
 .../apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java  | 3 +--
 3 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 0ae9e47..9dd770c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -38,12 +38,12 @@ import java.util.jar.JarInputStream;
 import java.util.jar.Manifest;
 import java.util.regex.Pattern;
 
-import com.google.common.io.NullOutputStream;
 import org.apache.commons.io.input.TeeInputStream;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IOUtils.NullOutputStream;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index dfc6872..b6b42544 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.crypto.key.kms.server;
 
 import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -32,6 +31,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import 
org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
+import org.apache.hadoop.util.StopWatch;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -555,7 +555,7 @@ public class KMS {
   throws Exception {
 LOG.trace("Entering reencryptEncryptedKeys method.");
 try {
-  final Stopwatch sw = new Stopwatch().start();
+  final StopWatch sw = new StopWatch().start();
   checkNotEmpty(name, "name");
   checkNotNull(jsonPayload, "jsonPayload");
   final UserGroupInformation user = HttpUserGroupInformation.get();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index 01c2038..65de397 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
 import 

hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

2018-02-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 60d82ac61 -> 4a3e96a6b


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.

(cherry picked from commit 8faf0b50d435039f69ea35f592856ca04d378809)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a3e96a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a3e96a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a3e96a6

Branch: refs/heads/branch-3.0.1
Commit: 4a3e96a6b9a9879f913767e31e00b009b4ffc3a3
Parents: 60d82ac
Author: Xiaoyu Yao 
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 10:41:34 2018 -0800

--
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++
 2 files changed, 129 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3e96a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 6a41cd8..4cc5389 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -253,7 +253,9 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.clearBlocks();
-nodeToRemove.getParent().removeChild(nodeToRemove);
+// Ensure the nodeToRemove is cleared from snapshot diff list
+nodeToRemove.getParent().removeChild(nodeToRemove,
+targetIIP.getLatestSnapshotId());
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3e96a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
 // make sure bar has been cleaned from inodeMap
 Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+final Path st = new Path("/st");
+hdfs.mkdirs(st);
+hdfs.allowSnapshot(st);
+
+

hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

2018-02-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 6ea2a9389 -> dc57c1d18


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.

(cherry picked from commit 8faf0b50d435039f69ea35f592856ca04d378809)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc57c1d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc57c1d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc57c1d1

Branch: refs/heads/branch-2.7
Commit: dc57c1d18ebf8ffd14eadb8627d7671bf12ea627
Parents: 6ea2a93
Author: Xiaoyu Yao 
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 10:38:34 2018 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 125 +++
 3 files changed, 130 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc57c1d1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index df62491..3077906 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -27,6 +27,8 @@ Release 2.7.6 - UNRELEASED
 HDFS-12371. BlockVerificationFailures and BlocksVerified show up as 0
 in Datanode JMX. (Hanisha Koneru via Kihwal Lee, shv).
 
+HDFS-13120. Snapshot diff could be corrupted after concat. (xyao)
+
 Release 2.7.5 - 2017-12-14
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc57c1d1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index e7c9738..6f7bb4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -230,7 +230,9 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.setBlocks(null);
-nodeToRemove.getParent().removeChild(nodeToRemove);
+// Ensure the nodeToRemove is cleared from snapshot diff list
+nodeToRemove.getParent().removeChild(nodeToRemove,
+targetIIP.getLatestSnapshotId());
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc57c1d1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index fc6c610..658d076 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -25,8 +25,10 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang.math.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,6 +40,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -60,11 +63,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests 

hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

2018-02-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5f6806d90 -> 7aec9fac4


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.

(cherry picked from commit 8faf0b50d435039f69ea35f592856ca04d378809)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7aec9fac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7aec9fac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7aec9fac

Branch: refs/heads/branch-2.8
Commit: 7aec9fac4a72e7c868c9c1c7b4252a746694c936
Parents: 5f6806d
Author: Xiaoyu Yao 
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 10:23:35 2018 -0800

--
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++
 2 files changed, 129 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7aec9fac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index c51b178..14df725 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -246,7 +246,9 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.clearBlocks();
-nodeToRemove.getParent().removeChild(nodeToRemove);
+// Ensure the nodeToRemove is cleared from snapshot diff list
+nodeToRemove.getParent().removeChild(nodeToRemove,
+targetIIP.getLatestSnapshotId());
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7aec9fac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
 // make sure bar has been cleaned from inodeMap
 Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+final Path st = new Path("/st");
+hdfs.mkdirs(st);
+hdfs.allowSnapshot(st);
+
+

hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

2018-02-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 3487f2b72 -> 77c6439c3


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.

(cherry picked from commit 8faf0b50d435039f69ea35f592856ca04d378809)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77c6439c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77c6439c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77c6439c

Branch: refs/heads/branch-2.9
Commit: 77c6439c3387037e2e62812e52cabac1b06095bd
Parents: 3487f2b
Author: Xiaoyu Yao 
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 10:20:55 2018 -0800

--
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++
 2 files changed, 129 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77c6439c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index c51b178..14df725 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -246,7 +246,9 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.clearBlocks();
-nodeToRemove.getParent().removeChild(nodeToRemove);
+// Ensure the nodeToRemove is cleared from snapshot diff list
+nodeToRemove.getParent().removeChild(nodeToRemove,
+targetIIP.getLatestSnapshotId());
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77c6439c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
 // make sure bar has been cleaned from inodeMap
 Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+final Path st = new Path("/st");
+hdfs.mkdirs(st);
+hdfs.allowSnapshot(st);
+
+

hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

2018-02-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 bef769e43 -> 49ab09f00


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.

(cherry picked from commit 8faf0b50d435039f69ea35f592856ca04d378809)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49ab09f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49ab09f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49ab09f0

Branch: refs/heads/branch-3.0
Commit: 49ab09f00b7bcc70a82735130b2eca8aed2d6676
Parents: bef769e
Author: Xiaoyu Yao 
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 10:18:35 2018 -0800

--
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++
 2 files changed, 129 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ab09f0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 6a41cd8..4cc5389 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -253,7 +253,9 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.clearBlocks();
-nodeToRemove.getParent().removeChild(nodeToRemove);
+// Ensure the nodeToRemove is cleared from snapshot diff list
+nodeToRemove.getParent().removeChild(nodeToRemove,
+targetIIP.getLatestSnapshotId());
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49ab09f0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
 // make sure bar has been cleaned from inodeMap
 Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+final Path st = new Path("/st");
+hdfs.mkdirs(st);
+hdfs.allowSnapshot(st);
+
+

hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

2018-02-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk f491f717e -> 8faf0b50d


HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by 
Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8faf0b50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8faf0b50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8faf0b50

Branch: refs/heads/trunk
Commit: 8faf0b50d435039f69ea35f592856ca04d378809
Parents: f491f71
Author: Xiaoyu Yao 
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 08:59:48 2018 -0800

--
 .../hdfs/server/namenode/FSDirConcatOp.java |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++
 2 files changed, 129 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8faf0b50/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 6a41cd8..4cc5389 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -253,7 +253,9 @@ class FSDirConcatOp {
 for (INodeFile nodeToRemove : srcList) {
   if(nodeToRemove != null) {
 nodeToRemove.clearBlocks();
-nodeToRemove.getParent().removeChild(nodeToRemove);
+// Ensure the nodeToRemove is cleared from snapshot diff list
+nodeToRemove.getParent().removeChild(nodeToRemove,
+targetIIP.getLatestSnapshotId());
 fsd.getINodeMap().remove(nodeToRemove);
 count++;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8faf0b50/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
 // make sure bar has been cleaned from inodeMap
 Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+final Path st = new Path("/st");
+hdfs.mkdirs(st);
+hdfs.allowSnapshot(st);
+
+Path[] files = new Path[3];
+for (int i = 0; i < 3; i++) {
+  files[i] = 

hadoop git commit: HADOOP-14920. KMSClientProvider won't work with KMS delegation token retrieved from non-Java client. Contributed by Xiaoyu Yao.

2018-02-08 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8f10d0209 -> 5f6806d90


HADOOP-14920. KMSClientProvider won't work with KMS delegation token retrieved 
from non-Java client. Contributed by Xiaoyu Yao.

(cherry picked from commit 2b08a1fc644904a37545107666efc25b3552542d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f6806d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f6806d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f6806d9

Branch: refs/heads/branch-2.8
Commit: 5f6806d906659900e0a9cdaa307aa50e96220538
Parents: 8f10d02
Author: Xiaoyu Yao 
Authored: Fri Oct 6 10:12:24 2017 -0700
Committer: Xiaoyu Yao 
Committed: Thu Feb 8 09:23:56 2018 -0800

--
 .../DelegationTokenAuthenticationHandler.java   |  5 +-
 .../web/DelegationTokenAuthenticator.java   |  1 +
 .../delegation/web/DelegationTokenManager.java  | 15 +++-
 ...tionTokenAuthenticationHandlerWithMocks.java | 72 ++--
 4 files changed, 70 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6806d9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index c23a94f..caf3172 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -236,8 +236,11 @@ public abstract class DelegationTokenAuthenticationHandler
   }
   String renewer = ServletUtils.getParameter(request,
   KerberosDelegationTokenAuthenticator.RENEWER_PARAM);
+  String service = ServletUtils.getParameter(request,
+  KerberosDelegationTokenAuthenticator.SERVICE_PARAM);
   try {
-Token dToken = tokenManager.createToken(requestUgi, 
renewer);
+Token dToken = tokenManager.createToken(requestUgi, renewer,
+service);
 map = delegationTokenToJSON(dToken);
   } catch (IOException ex) {
 throw new AuthenticationException(ex.toString(), ex);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6806d9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 2d60d4a..509c6ef 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -66,6 +66,7 @@ public abstract class DelegationTokenAuthenticator implements 
Authenticator {
   public static final String DELEGATION_PARAM = "delegation";
   public static final String TOKEN_PARAM = "token";
   public static final String RENEWER_PARAM = "renewer";
+  public static final String SERVICE_PARAM = "service";
   public static final String DELEGATION_TOKEN_JSON = "Token";
   public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
   public static final String RENEW_DELEGATION_TOKEN_JSON = "long";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6806d9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
index b1a8d48..fd19b67 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
+++ 

[2/2] hadoop git commit: HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up. Contributed by Jianfei Jiang.

2018-02-08 Thread brahma
HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one 
namenode is up. Contributed by Jianfei Jiang.

(cherry picked from commit 9873eb63a7525301ab601a0ae65f7e615d1a6bce)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3487f2b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3487f2b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3487f2b7

Branch: refs/heads/branch-2.9
Commit: 3487f2b722654e3113559c8e22262b51757902e9
Parents: 52281fd
Author: Brahma Reddy Battula 
Authored: Thu Feb 8 18:33:11 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Feb 8 18:44:34 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/HAUtil.java |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 192 +---
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 464 ++-
 4 files changed, 601 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3487f2b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 043e087..67fdc04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import 
org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -320,6 +321,7 @@ public class HAUtil {
*/
   public static boolean isAtLeastOneActive(List namenodes)
   throws IOException {
+List exceptions = new ArrayList<>();
 for (ClientProtocol namenode : namenodes) {
   try {
 namenode.getFileInfo("/");
@@ -329,10 +331,15 @@ public class HAUtil {
 if (cause instanceof StandbyException) {
   // This is expected to happen for a standby NN.
 } else {
-  throw re;
+  exceptions.add(re);
 }
+  } catch (IOException ioe) {
+exceptions.add(ioe);
   }
 }
+if(!exceptions.isEmpty()){
+  throw MultipleIOException.createIOException(exceptions);
+}
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3487f2b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 50e9420..e724db7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4265,7 +4265,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 
   void setBalancerBandwidth(long bandwidth) throws IOException {
-checkOperation(OperationCategory.UNCHECKED);
+checkOperation(OperationCategory.WRITE);
 checkSuperuserPrivilege();
 getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3487f2b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 1264fa0..1e877ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
@@ -49,7 +48,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import 

[1/2] hadoop git commit: HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up. Contributed by Jianfei Jiang.

2018-02-08 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b7f7fb003 -> 9873eb63a
  refs/heads/branch-2.9 52281fd89 -> 3487f2b72


HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one 
namenode is up. Contributed by Jianfei Jiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9873eb63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9873eb63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9873eb63

Branch: refs/heads/branch-2
Commit: 9873eb63a7525301ab601a0ae65f7e615d1a6bce
Parents: b7f7fb0
Author: Brahma Reddy Battula 
Authored: Thu Feb 8 18:33:11 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Feb 8 18:33:11 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/HAUtil.java |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 192 +---
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 464 ++-
 4 files changed, 601 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9873eb63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 043e087..67fdc04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import 
org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -320,6 +321,7 @@ public class HAUtil {
*/
   public static boolean isAtLeastOneActive(List namenodes)
   throws IOException {
+List exceptions = new ArrayList<>();
 for (ClientProtocol namenode : namenodes) {
   try {
 namenode.getFileInfo("/");
@@ -329,10 +331,15 @@ public class HAUtil {
 if (cause instanceof StandbyException) {
   // This is expected to happen for a standby NN.
 } else {
-  throw re;
+  exceptions.add(re);
 }
+  } catch (IOException ioe) {
+exceptions.add(ioe);
   }
 }
+if(!exceptions.isEmpty()){
+  throw MultipleIOException.createIOException(exceptions);
+}
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9873eb63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 50e9420..e724db7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4265,7 +4265,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 
   void setBalancerBandwidth(long bandwidth) throws IOException {
-checkOperation(OperationCategory.UNCHECKED);
+checkOperation(OperationCategory.WRITE);
 checkSuperuserPrivilege();
 getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9873eb63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 1264fa0..1e877ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
@@ -49,7 +48,6 @@ import