Author: jing9 Date: Wed Sep 18 06:33:22 2013 New Revision: 1524308 URL: http://svn.apache.org/r1524308 Log: HDFS-5212. Merge change r1524302 from branch-2.
Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1524308&r1=1524307&r2=1524308&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Wed Sep 18 06:33:22 2013 @@ -40,6 +40,7 @@ import org.apache.hadoop.oncrpc.RpcAccep import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.jboss.netty.channel.Channel; /** @@ -88,7 +89,8 @@ public class RpcProgramMountd extends Rp if (LOG.isDebugEnabled()) { LOG.debug("MOUNT NULLOP : " + " client: " + client); } - return RpcAcceptedReply.voidReply(out, xid); + return RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write( + out); } @Override @@ -155,7 +157,7 @@ public class RpcProgramMountd extends Rp String host = client.getHostName(); mounts.remove(new MountEntry(host, path)); - RpcAcceptedReply.voidReply(out, xid); + RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(out); return out; } @@ -165,7 +167,8 @@ public class RpcProgramMountd extends Rp LOG.debug("MOUNT UMNTALL : " + " client: " + client); } mounts.clear(); - return RpcAcceptedReply.voidReply(out, xid); + return RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write( + out); } @Override @@ -190,8 +193,9 @@ public class RpcProgramMountd extends Rp out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); } else { // Invalid procedure - RpcAcceptedReply.voidReply(out, xid, - RpcAcceptedReply.AcceptState.PROC_UNAVAIL); + RpcAcceptedReply.getInstance(xid, + RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( + out); } return out; } Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1524308&r1=1524307&r2=1524308&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Wed Sep 18 06:33:22 2013 @@ -49,6 +49,7 @@ import org.apache.hadoop.nfs.nfs3.respon import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.jboss.netty.channel.Channel; /** @@ -293,7 +294,8 @@ class OpenFileCtx { WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); } else { // Handle repeated write requests(same xid or not). // If already replied, send reply again. If not replied, drop the @@ -315,7 +317,8 @@ class OpenFileCtx { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, request.getCount(), request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); } updateLastAccessTime(); @@ -369,7 +372,8 @@ class OpenFileCtx { WccData fileWcc = new WccData(preOpAttr, postOpAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); writeCtx.setReplied(true); } @@ -394,7 +398,8 @@ class OpenFileCtx { WccData fileWcc = new WccData(preOpAttr, postOpAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); writeCtx.setReplied(true); } @@ -420,7 +425,8 @@ class OpenFileCtx { } updateLastAccessTime(); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); } } @@ -715,7 +721,8 @@ class OpenFileCtx { WccData fileWcc = new WccData(preOpAttr, latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); } } catch (IOException e) { @@ -723,7 +730,8 @@ class OpenFileCtx { + offset + " and length " + data.length, e); if (!writeCtx.getReplied()) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); // Keep stream open. Either client retries or SteamMonitor closes it. } @@ -760,8 +768,9 @@ class OpenFileCtx { WccData fileWcc = new WccData(preOpAttr, latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, 0, writeCtx.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(writeCtx.getChannel(), - response.send(new XDR(), writeCtx.getXid()), writeCtx.getXid()); + Nfs3Utils.writeChannel(writeCtx.getChannel(), response + .writeHeaderAndResponse(new XDR(), writeCtx.getXid(), + new VerifierNone()), writeCtx.getXid()); } } Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1524308&r1=1524307&r2=1524308&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Wed Sep 18 06:33:22 2013 @@ -98,7 +98,6 @@ import org.apache.hadoop.nfs.nfs3.respon import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response; -import org.apache.hadoop.nfs.nfs3.response.VoidResponse; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; @@ -108,12 +107,13 @@ import org.apache.hadoop.oncrpc.RpcDenie import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcReply; import org.apache.hadoop.oncrpc.XDR; -import org.apache.hadoop.oncrpc.security.CredentialsSys; import org.apache.hadoop.oncrpc.security.Credentials; -import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.CredentialsSys; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.oncrpc.security.SysSecurityHandler; -import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.security.AccessControlException; import org.jboss.netty.channel.Channel; @@ -209,7 +209,7 @@ public class RpcProgramNfs3 extends RpcP if (LOG.isDebugEnabled()) { LOG.debug("NFS NULL"); } - return new VoidResponse(Nfs3Status.NFS3_OK); + return new NFS3Response(Nfs3Status.NFS3_OK); } @Override @@ -1790,9 +1790,10 @@ public class RpcProgramNfs3 extends RpcP + rpcCall.getCredential().getFlavor() + " is not AUTH_SYS or RPCSEC_GSS."); XDR reply = new XDR(); - reply = RpcDeniedReply.voidReply(reply, xid, + RpcDeniedReply rdr = new RpcDeniedReply(xid, RpcReply.ReplyState.MSG_ACCEPTED, - RpcDeniedReply.RejectState.AUTH_ERROR); + RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone()); + rdr.write(reply); return reply; } } @@ -1857,11 +1858,13 @@ public class RpcProgramNfs3 extends RpcP response = commit(xdr, securityHandler, client); } else { // Invalid procedure - RpcAcceptedReply.voidReply(out, xid, - RpcAcceptedReply.AcceptState.PROC_UNAVAIL); + RpcAcceptedReply.getInstance(xid, + RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( + out); } if (response != null) { - out = response.send(out, xid); + // TODO: currently we just return VerifierNone + out = response.writeHeaderAndResponse(out, xid, new VerifierNone()); } return out; Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java?rev=1524308&r1=1524307&r2=1524308&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java Wed Sep 18 06:33:22 2013 @@ -39,6 +39,7 @@ import org.apache.hadoop.nfs.nfs3.reques import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.util.Daemon; import org.jboss.netty.channel.Channel; @@ -118,7 +119,8 @@ public class WriteManager { byte[] data = request.getData().array(); if (data.length < count) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); return; } @@ -155,7 +157,8 @@ public class WriteManager { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, count, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); return; } @@ -182,10 +185,12 @@ public class WriteManager { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); } else { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid); + Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( + new XDR(), xid, new VerifierNone()), xid); } } Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java?rev=1524308&r1=1524307&r2=1524308&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java Wed Sep 18 06:33:22 2013 @@ -38,6 +38,8 @@ import org.apache.hadoop.oncrpc.RpcReply import org.apache.hadoop.oncrpc.SimpleTcpClient; import org.apache.hadoop.oncrpc.SimpleTcpClientHandler; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsNone; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelHandlerContext; @@ -58,15 +60,9 @@ public class TestOutOfOrderWrite { static XDR create() { XDR request = new XDR(); - RpcCall.write(request, 0x8000004c, Nfs3Constant.PROGRAM, - Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3.CREATE.getValue()); - - // credentials - request.writeInt(0); // auth null - request.writeInt(0); // length zero - // verifier - request.writeInt(0); // auth null - request.writeInt(0); // length zero + RpcCall.getInstance(0x8000004c, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, + Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(), + new VerifierNone()).write(request); SetAttr3 objAttr = new SetAttr3(); CREATE3Request createReq = new CREATE3Request(new FileHandle("/"), @@ -78,15 +74,10 @@ public class TestOutOfOrderWrite { static XDR write(FileHandle handle, int xid, long offset, int count, byte[] data) { XDR request = new XDR(); - RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, - Nfs3Constant.NFSPROC3.WRITE.getValue()); + RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, + Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(), + new VerifierNone()).write(request); - // credentials - request.writeInt(0); // auth null - request.writeInt(0); // length zero - // verifier - request.writeInt(0); // auth null - request.writeInt(0); // length zero WRITE3Request write1 = new WRITE3Request(handle, offset, count, WriteStableHow.UNSTABLE, ByteBuffer.wrap(data)); write1.serialize(request); Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java?rev=1524308&r1=1524307&r2=1524308&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java Wed Sep 18 06:33:22 2013 @@ -26,6 +26,8 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Co import org.apache.hadoop.oncrpc.RegistrationClient; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsNone; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapRequest; @@ -78,11 +80,8 @@ public class TestPortmapRegister { static void createPortmapXDRheader(XDR xdr_out, int procedure) { // TODO: Move this to RpcRequest - RpcCall.write(xdr_out, 0, 100000, 2, procedure); - xdr_out.writeInt(0); //no auth - xdr_out.writeInt(0); - xdr_out.writeInt(0); - xdr_out.writeInt(0); + RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(), + new VerifierNone()).write(xdr_out); /* xdr_out.putInt(1); //unix auth Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java?rev=1524308&r1=1524307&r2=1524308&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java Wed Sep 18 06:33:22 2013 @@ -27,6 +27,8 @@ import java.net.UnknownHostException; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsNone; +import org.apache.hadoop.oncrpc.security.VerifierNone; // TODO: convert this to Junit public class TestUdpServer { @@ -82,7 +84,8 @@ public class TestUdpServer { static void createPortmapXDRheader(XDR xdr_out, int procedure) { // Make this a method - RpcCall.write(xdr_out, 0, 100000, 2, procedure); + RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(), + new VerifierNone()).write(xdr_out); } static void testGetportMount() { Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1524308&r1=1524307&r2=1524308&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Sep 18 06:33:22 2013 @@ -90,6 +90,9 @@ Release 2.1.1-beta - 2013-09-23 HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) + HDFS-5212. Refactor RpcMessage and NFS3Response to support different + types of authentication information. (jing9) + OPTIMIZATIONS BUG FIXES