Author: szetszwo
Date: Tue May 29 21:16:49 2012
New Revision: 1343997

URL: http://svn.apache.org/viewvc?rev=1343997&view=rev
Log:
svn merge -c 1343738 from branch-1 for HDFS-3453.

Modified:
    hadoop/common/branches/branch-1.1/   (props changed)
    hadoop/common/branches/branch-1.1/CHANGES.txt   (contents, props changed)
    
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

Propchange: hadoop/common/branches/branch-1.1/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-1:r1343738

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1343997&r1=1343996&r2=1343997&view=diff
==============================================================================
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Tue May 29 21:16:49 2012
@@ -239,6 +239,9 @@ Release 1.1.0 - unreleased
 
     HADOOP-8329. Build fails with Java 7. (eli)
 
+    HDFS-3453. HDFS 1.x client is not interoperable with pre 1.x server.
+    (Kihwal Lee via suresh)
+    
 Release 1.0.3 - 2012.05.07
 
   NEW FEATURES

Propchange: hadoop/common/branches/branch-1.1/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-1/CHANGES.txt:r1343738

Modified: 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1343997&r1=1343996&r2=1343997&view=diff
==============================================================================
--- 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
 Tue May 29 21:16:49 2012
@@ -3342,8 +3342,17 @@ public class DFSClient implements FSCons
       computePacketChunkSize(writePacketSize, bytesPerChecksum);
 
       try {
-        namenode.create(
-            src, masked, clientName, overwrite, createParent, replication, 
blockSize);
+        // Make sure the regular create() is done through the old create().
+        // This is done to ensure that newer clients (post-1.0) can talk to
+        // older clusters (pre-1.0). Older clusters lack the new  create()
+        // method accepting createParent as one of the arguments.
+        if (createParent) {
+          namenode.create(
+            src, masked, clientName, overwrite, replication, blockSize);
+        } else {
+          namenode.create(
+            src, masked, clientName, overwrite, false, replication, blockSize);
+        }
       } catch(RemoteException re) {
         throw re.unwrapRemoteException(AccessControlException.class,
                                        FileAlreadyExistsException.class,


Reply via email to