This is an automated email from the ASF dual-hosted git repository. cliang pushed a commit to branch branch-3.0 in repository https://gitbox.apache.org/repos/asf/hadoop.git
commit 451b8b8f1675366de3f17762385867eda307f904 Author: Konstantin V Shvachko <[email protected]> AuthorDate: Wed Dec 19 12:39:57 2018 -0800 HDFS-14160. [SBN read] ObserverReadInvocationHandler should implement RpcInvocationHandler. Contributed by Konstantin V Shvachko. --- .../hdfs/server/namenode/ha/ObserverReadProxyProvider.java | 13 +++++++++++-- .../org/apache/hadoop/hdfs/server/namenode/TestFsck.java | 2 +- .../apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java | 10 +++++++--- .../hadoop/hdfs/server/namenode/ha/TestObserverNode.java | 13 +++++++++++++ 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java index e9d53f6..96932a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import java.io.Closeable; import java.io.IOException; -import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Proxy; @@ -39,9 +38,11 @@ import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy.RetryAction; import org.apache.hadoop.ipc.AlignmentContext; +import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.ipc.ObserverRetryOnActiveException; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.RpcInvocationHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -239,7 +240,7 @@ public class ObserverReadProxyProvider<T extends ClientProtocol> * * Write requests are always forwarded to the active. */ - private class ObserverReadInvocationHandler implements InvocationHandler { + private class ObserverReadInvocationHandler implements RpcInvocationHandler { @Override public Object invoke(Object proxy, final Method method, final Object[] args) @@ -322,6 +323,14 @@ public class ObserverReadProxyProvider<T extends ClientProtocol> lastProxy = activeProxy; return retVal; } + + @Override + public void close() throws IOException {} + + @Override + public ConnectionId getConnectionId() { + return RPC.getConnectionIdForProxy(getCurrentProxy().proxy); + } } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 2b5d762..3ef8587 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -166,7 +166,7 @@ public class TestFsck { private static final String LINE_SEPARATOR = System.getProperty("line.separator"); - static String runFsck(Configuration conf, int expectedErrCode, + public static String runFsck(Configuration conf, int expectedErrCode, boolean checkErrorCode, String... path) throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index 1e9418b..16aa924 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -175,10 +175,14 @@ public abstract class HATestUtil { MiniDFSCluster cluster, Configuration conf, Class<P> classFPP, boolean isObserverReadEnabled) throws IOException, URISyntaxException { - conf = new Configuration(conf); - setupHAConfiguration(cluster, conf, 0, classFPP); + String logicalName = conf.get(DFSConfigKeys.DFS_NAMESERVICES); + URI nnUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + logicalName); + conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + + "." + logicalName, classFPP.getName()); + conf.set("fs.defaultFS", nnUri.toString()); + DistributedFileSystem dfs = (DistributedFileSystem) - FileSystem.get(getLogicalUri(cluster), conf); + FileSystem.get(nnUri, conf); @SuppressWarnings("unchecked") P provider = (P) ((RetryInvocationHandler<?>) Proxy.getInvocationHandler( dfs.getClient().getNamenode())).getProxyProvider(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java index d8e0cfa..061f6ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.namenode.TestFsck; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -339,6 +340,18 @@ public class TestObserverNode { Mockito.reset(bmSpy); } + @Test + public void testFsckWithObserver() throws Exception { + setObserverRead(true); + + dfs.create(testPath, (short)1).close(); + assertSentTo(0); + + final String result = TestFsck.runFsck(conf, 0, true, "/"); + LOG.info("result=" + result); + assertTrue(result.contains("Status: HEALTHY")); + } + private void assertSentTo(int nnIdx) throws IOException { assertTrue("Request was not sent to the expected namenode " + nnIdx, HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIdx)); --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
