[1/3] hadoop git commit: HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote NameNode (Charles Lamb via Colin P. McCabe)
Repository: hadoop Updated Branches: refs/heads/branch-2.7 9c7585020 -> 53907b66c HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote NameNode (Charles Lamb via Colin P. McCabe) (cherry picked from commit ffce9a3413277a69444fcb890460c885de56db69) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51f012ae Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51f012ae Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51f012ae Branch: refs/heads/branch-2.7 Commit: 51f012aec4f219823bb277e48b09a51d64f43977 Parents: 9c75850 Author: Colin Patrick MccabeAuthored: Tue May 5 11:27:36 2015 -0700 Committer: Konstantin V Shvachko Committed: Mon Jul 3 11:32:45 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSTestUtil.java | 38 ++ .../server/namenode/NNThroughputBenchmark.java | 136 +-- 3 files changed, 135 insertions(+), 42 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f012ae/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b8ac822..3b708f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -105,6 +105,9 @@ Release 2.7.4 - UNRELEASED HDFS-8549. Abort the balancer if an upgrade is in progress. (wang) Backport HDFS-11808 by Akira Ajisaka. +HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote +NameNode (Charles Lamb via Colin P. McCabe) + OPTIMIZATIONS HDFS-10896. Move lock logging logic from FSNamesystem into FSNamesystemLock. http://git-wip-us.apache.org/repos/asf/hadoop/blob/51f012ae/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 04040fc..14d007e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha .ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; @@ -96,6 +97,7 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -117,6 +119,7 @@ import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.*; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.fs.CreateFlag.CREATE; @@ -1744,6 +1747,41 @@ public class DFSTestUtil { } /** + * Get the NamenodeProtocol RPC proxy for the NN associated with this + * DFSClient object + * + * @param nameNodeUri the URI of the NN to get a proxy for. + * + * @return the Namenode RPC proxy associated with this DFSClient object + */ + @VisibleForTesting + public static NamenodeProtocol getNamenodeProtocolProxy(Configuration conf, + URI nameNodeUri, UserGroupInformation ugi) + throws IOException { +return NameNodeProxies.createNonHAProxy(conf, +NameNode.getAddress(nameNodeUri), NamenodeProtocol.class, ugi, false). +getProxy(); + } + + /** + * Get the RefreshUserMappingsProtocol RPC proxy for the NN associated with + * this DFSClient object + * + * @param nameNodeUri the URI of the NN to get a proxy for. + * + * @return the RefreshUserMappingsProtocol RPC proxy associated with this + * DFSClient object + */ + @VisibleForTesting + public static RefreshUserMappingsProtocol
hadoop git commit: HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote NameNode (Charles Lamb via Colin P. McCabe)
Repository: hadoop Updated Branches: refs/heads/branch-2 da410ea02 - f79b1f019 HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote NameNode (Charles Lamb via Colin P. McCabe) (cherry picked from commit ffce9a3413277a69444fcb890460c885de56db69) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f79b1f01 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f79b1f01 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f79b1f01 Branch: refs/heads/branch-2 Commit: f79b1f019001ec344b22a76f87a8b5ab17aa512f Parents: da410ea Author: Colin Patrick Mccabe cmcc...@cloudera.com Authored: Tue May 5 11:27:36 2015 -0700 Committer: Colin Patrick Mccabe cmcc...@cloudera.com Committed: Wed May 6 10:50:33 2015 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSTestUtil.java | 40 ++ .../server/namenode/NNThroughputBenchmark.java | 136 +-- 3 files changed, 137 insertions(+), 42 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f79b1f01/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7dcb58e..739456f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -186,6 +186,9 @@ Release 2.8.0 - UNRELEASED HDFS-8314. Move HdfsServerConstants#IO_FILE_BUFFER_SIZE and SMALL_BUFFER_SIZE to the users. (Li Lu via wheat9) +HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote +NameNode (Charles Lamb via Colin P. McCabe) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/f79b1f01/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index a73f595..b74e66c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -48,6 +48,7 @@ import java.lang.reflect.Modifier; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.Socket; +import java.net.URI; import java.net.URL; import java.net.URLConnection; import java.nio.ByteBuffer; @@ -64,6 +65,7 @@ import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; @@ -129,12 +131,14 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -147,6 +151,7 @@ import org.apache.log4j.Level; import org.junit.Assume; import org.mockito.internal.util.reflection.Whitebox; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; @@ -1764,6 +1769,41 @@ public class DFSTestUtil { } /** + * Get the NamenodeProtocol RPC proxy for the NN associated with this + * DFSClient object + * + * @param nameNodeUri the URI of the NN to get a proxy for. + * + * @return the Namenode RPC proxy associated with this DFSClient object + */ + @VisibleForTesting + public static NamenodeProtocol getNamenodeProtocolProxy(Configuration conf, + URI nameNodeUri, UserGroupInformation ugi) + throws IOException { +return NameNodeProxies.createNonHAProxy(conf, +NameNode.getAddress(nameNodeUri), NamenodeProtocol.class, ugi, false). +getProxy(); +
[14/17] hadoop git commit: HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote NameNode (Charles Lamb via Colin P. McCabe)
HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote NameNode (Charles Lamb via Colin P. McCabe) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffce9a34 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffce9a34 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffce9a34 Branch: refs/heads/HDFS-7240 Commit: ffce9a3413277a69444fcb890460c885de56db69 Parents: e4c3b52 Author: Colin Patrick Mccabe cmcc...@cloudera.com Authored: Tue May 5 11:27:36 2015 -0700 Committer: Colin Patrick Mccabe cmcc...@cloudera.com Committed: Tue May 5 11:34:58 2015 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSTestUtil.java | 40 ++ .../server/namenode/NNThroughputBenchmark.java | 136 +-- 3 files changed, 137 insertions(+), 42 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffce9a34/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c89e6fe..01de9b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -510,6 +510,9 @@ Release 2.8.0 - UNRELEASED HDFS-7758. Retire FsDatasetSpi#getVolumes() and use FsDatasetSpi#getVolumeRefs() instead (Lei (Eddy) Xu via Colin P. McCabe) +HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote +NameNode (Charles Lamb via Colin P. McCabe) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffce9a34/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index a8df991..cfee997 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -48,6 +48,7 @@ import java.lang.reflect.Modifier; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.Socket; +import java.net.URI; import java.net.URL; import java.net.URLConnection; import java.nio.ByteBuffer; @@ -64,6 +65,7 @@ import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; @@ -129,12 +131,14 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -147,6 +151,7 @@ import org.apache.log4j.Level; import org.junit.Assume; import org.mockito.internal.util.reflection.Whitebox; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; @@ -1756,6 +1761,41 @@ public class DFSTestUtil { } /** + * Get the NamenodeProtocol RPC proxy for the NN associated with this + * DFSClient object + * + * @param nameNodeUri the URI of the NN to get a proxy for. + * + * @return the Namenode RPC proxy associated with this DFSClient object + */ + @VisibleForTesting + public static NamenodeProtocol getNamenodeProtocolProxy(Configuration conf, + URI nameNodeUri, UserGroupInformation ugi) + throws IOException { +return NameNodeProxies.createNonHAProxy(conf, +NameNode.getAddress(nameNodeUri), NamenodeProtocol.class, ugi, false). +getProxy(); + } + + /** + * Get the RefreshUserMappingsProtocol RPC proxy for the NN associated with + * this DFSClient object + * + * @param
hadoop git commit: HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote NameNode (Charles Lamb via Colin P. McCabe)
Repository: hadoop Updated Branches: refs/heads/trunk e4c3b52c8 - ffce9a341 HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote NameNode (Charles Lamb via Colin P. McCabe) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffce9a34 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffce9a34 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffce9a34 Branch: refs/heads/trunk Commit: ffce9a3413277a69444fcb890460c885de56db69 Parents: e4c3b52 Author: Colin Patrick Mccabe cmcc...@cloudera.com Authored: Tue May 5 11:27:36 2015 -0700 Committer: Colin Patrick Mccabe cmcc...@cloudera.com Committed: Tue May 5 11:34:58 2015 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSTestUtil.java | 40 ++ .../server/namenode/NNThroughputBenchmark.java | 136 +-- 3 files changed, 137 insertions(+), 42 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffce9a34/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c89e6fe..01de9b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -510,6 +510,9 @@ Release 2.8.0 - UNRELEASED HDFS-7758. Retire FsDatasetSpi#getVolumes() and use FsDatasetSpi#getVolumeRefs() instead (Lei (Eddy) Xu via Colin P. McCabe) +HDFS-7847. Modify NNThroughputBenchmark to be able to operate on a remote +NameNode (Charles Lamb via Colin P. McCabe) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffce9a34/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index a8df991..cfee997 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -48,6 +48,7 @@ import java.lang.reflect.Modifier; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.Socket; +import java.net.URI; import java.net.URL; import java.net.URLConnection; import java.nio.ByteBuffer; @@ -64,6 +65,7 @@ import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; @@ -129,12 +131,14 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; +import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -147,6 +151,7 @@ import org.apache.log4j.Level; import org.junit.Assume; import org.mockito.internal.util.reflection.Whitebox; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; @@ -1756,6 +1761,41 @@ public class DFSTestUtil { } /** + * Get the NamenodeProtocol RPC proxy for the NN associated with this + * DFSClient object + * + * @param nameNodeUri the URI of the NN to get a proxy for. + * + * @return the Namenode RPC proxy associated with this DFSClient object + */ + @VisibleForTesting + public static NamenodeProtocol getNamenodeProtocolProxy(Configuration conf, + URI nameNodeUri, UserGroupInformation ugi) + throws IOException { +return NameNodeProxies.createNonHAProxy(conf, +NameNode.getAddress(nameNodeUri), NamenodeProtocol.class, ugi, false). +getProxy(); + } + + /** + * Get the RefreshUserMappingsProtocol RPC proxy