This is an automated email from the ASF dual-hosted git repository. cliang pushed a commit to branch branch-3.2 in repository https://gitbox.apache.org/repos/asf/hadoop.git
commit e3f3fb1c180188776c4cabc44ca654cf0e440488 Author: Erik Krogen <[email protected]> AuthorDate: Wed Aug 1 09:58:04 2018 -0700 HDFS-13688. [SBN read] Introduce msync API call. Contributed by Chen Liang. --- .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java | 14 ++++++++++++++ .../org/apache/hadoop/hdfs/protocol/ClientProtocol.java | 11 +++++++++++ .../protocolPB/ClientNamenodeProtocolTranslatorPB.java | 12 ++++++++++++ .../src/main/proto/ClientNamenodeProtocol.proto | 8 ++++++++ .../java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java | 3 ++- .../server/federation/router/RouterClientProtocol.java | 5 +++++ .../hdfs/server/federation/router/RouterRpcServer.java | 5 +++++ .../ClientNamenodeProtocolServerSideTranslatorPB.java | 13 +++++++++++++ .../hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 5 +++++ 9 files changed, 75 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index ce1083d..b27858d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -3181,4 +3181,18 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, checkOpen(); return new OpenFilesIterator(namenode, tracer, openFilesTypes, path); } + + /** + * A blocking call to wait for Observer NameNode state ID to reach to the + * current client state ID. Current client state ID is given by the client + * alignment context. + * An assumption is that client alignment context has the state ID set at this + * point. This is become ObserverReadProxyProvider sets up the initial state + * ID when it is being created. + * + * @throws IOException + */ + public void msync() throws IOException { + namenode.msync(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 06ce8e5..8e348cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1790,6 +1790,17 @@ public interface ClientProtocol { EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException; /** + * Called by client to wait until the server has reached the state id of the + * client. The client and server state id are given by client side and server + * side alignment context respectively. This can be a blocking call. + * + * @throws IOException + */ + @Idempotent + @ReadOnly + void msync() throws IOException; + + /** * Satisfy the storage policy for a file/directory. * @param path Path of an existing file/directory. * @throws AccessControlException If access is denied. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index e4bca51..f5aa174 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -158,6 +158,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSa import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; @@ -1948,6 +1950,16 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override + public void msync() throws IOException { + MsyncRequestProto.Builder req = MsyncRequestProto.newBuilder(); + try { + rpcProxy.msync(null, req.build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override public void satisfyStoragePolicy(String src) throws IOException { SatisfyStoragePolicyRequestProto req = SatisfyStoragePolicyRequestProto.newBuilder().setSrc(src).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index 49ea3f3..55113a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -830,6 +830,12 @@ message ListOpenFilesResponseProto { repeated OpenFilesTypeProto types = 3; } +message MsyncRequestProto { +} + +message MsyncResponseProto { +} + message SatisfyStoragePolicyRequestProto { required string src = 1; } @@ -1024,6 +1030,8 @@ service ClientNamenodeProtocol { returns(GetQuotaUsageResponseProto); rpc listOpenFiles(ListOpenFilesRequestProto) returns(ListOpenFilesResponseProto); + rpc msync(MsyncRequestProto) + returns(MsyncResponseProto); rpc satisfyStoragePolicy(SatisfyStoragePolicyRequestProto) returns(SatisfyStoragePolicyResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java index 34e84fa..57db8ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java @@ -71,7 +71,8 @@ public class TestReadOnly { "getDataEncryptionKey", "getCurrentEditLogTxid", "getEditsFromTxid", - "getQuotaUsage" + "getQuotaUsage", + "msync" ) ); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index f45da3c..7739477 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -1533,6 +1533,11 @@ public class RouterClientProtocol implements ClientProtocol { } @Override + public void msync() throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ, false); + } + + @Override public void satisfyStoragePolicy(String path) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.WRITE, false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 165b429..738a06d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1192,6 +1192,11 @@ public class RouterRpcServer extends AbstractService } @Override // ClientProtocol + public void msync() throws IOException { + clientProto.msync(); + } + + @Override // ClientProtocol public void satisfyStoragePolicy(String path) throws IOException { clientProto.satisfyStoragePolicy(path); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index e51529e..8a4d4a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -175,6 +175,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Modify import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; @@ -1894,6 +1896,17 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } @Override + public MsyncResponseProto msync(RpcController controller, + MsyncRequestProto req) throws ServiceException { + try { + server.msync(); + return MsyncResponseProto.newBuilder().build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override public SatisfyStoragePolicyResponseProto satisfyStoragePolicy( RpcController controller, SatisfyStoragePolicyRequestProto request) throws ServiceException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index e8cec39..44dc0f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1367,6 +1367,11 @@ public class NameNodeRpcServer implements NamenodeProtocols { } @Override // ClientProtocol + public void msync() throws IOException { + // TODO : need to be filled up if needed. May be a no-op here. + } + + @Override // ClientProtocol public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { checkNNStartup(); --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
