Author: rangadi
Date: Fri Feb 13 23:30:25 2009
New Revision: 744282

URL: http://svn.apache.org/viewvc?rev=744282&view=rev
Log:
HADOOP-5224. HDFS append() is disabled. It throws
UnsupportedOperationException. committed only to 0.19.x (Raghu Angadi)

Modified:
    hadoop/core/branches/branch-0.19/CHANGES.txt
    
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
    
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
    
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
    
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java

Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=744282&r1=744281&r2=744282&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Fri Feb 13 23:30:25 2009
@@ -9,6 +9,9 @@
     HADOOP-5225. Workaround for tmp file handling in HDFS. sync() is 
     incomplete as a result. committed only to 0.19.x. (Raghu Angadi)
 
+    HADOOP-5224. HDFS append() is disabled. It throws 
+    UnsupportedOperationException. committed only to 0.19.x (Raghu Angadi) 
+
   IMPROVEMENTS
 
     HADOOP-4739. Fix spelling and grammar, improve phrasing of some sections in

Modified: 
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=744282&r1=744281&r2=744282&view=diff
==============================================================================
--- 
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
 (original)
+++ 
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
 Fri Feb 13 23:30:25 2009
@@ -175,9 +175,11 @@
   /** This optional operation is not yet supported. */
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
-
-    DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), 
bufferSize, progress);
-    return new FSDataOutputStream(op, statistics, op.getInitialLen());
+    // disable append() in 0.19.x
+    throw new UnsupportedOperationException("HDFS does not support append 
yet");
+        
+    //DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), 
bufferSize, progress);
+    //return new FSDataOutputStream(op, statistics, op.getInitialLen());
   }
 
   public FSDataOutputStream create(Path f, FsPermission permission,

Modified: 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=744282&r1=744281&r2=744282&view=diff
==============================================================================
--- 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
 (original)
+++ 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
 Fri Feb 13 23:30:25 2009
@@ -123,6 +123,8 @@
    * Verify that all data exists in file.
    */ 
   public void testSimpleAppend() throws IOException {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     Configuration conf = new Configuration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -369,6 +371,8 @@
    * Test that appends to files at random offsets.
    */
   public void testComplexAppend() throws IOException {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     initBuffer(fileSize);
     Configuration conf = new Configuration();
     conf.setInt("heartbeat.recheck.interval", 2000);

Modified: 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=744282&r1=744281&r2=744282&view=diff
==============================================================================
--- 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
 (original)
+++ 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
 Fri Feb 13 23:30:25 2009
@@ -67,6 +67,8 @@
 
   /** TC1: Append on block boundary. */
   public void testTC1() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC1/foo");
     System.out.println("p=" + p);
 
@@ -92,6 +94,8 @@
 
   /** TC2: Append on non-block boundary. */
   public void testTC2() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC2/foo");
     System.out.println("p=" + p);
 
@@ -117,6 +121,8 @@
 
   /** TC5: Only one simultaneous append. */
   public void testTC5() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC5/foo");
     System.out.println("p=" + p);
 
@@ -144,6 +150,8 @@
 
   /** TC7: Corrupted replicas are present. */
   public void testTC7() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final short repl = 2;
     final Path p = new Path("/TC7/foo");
     System.out.println("p=" + p);
@@ -189,6 +197,8 @@
 
   /** TC11: Racing rename */
   public void testTC11() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC11/foo");
     System.out.println("p=" + p);
 
@@ -242,6 +252,8 @@
 
   /** TC12: Append to partial CRC chunk */
   public void testTC12() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC12/foo");
     System.out.println("p=" + p);
     

Modified: 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=744282&r1=744281&r2=744282&view=diff
==============================================================================
--- 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java 
(original)
+++ 
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestQuota.java 
Fri Feb 13 23:30:25 2009
@@ -547,6 +547,7 @@
       c = dfs.getContentSummary(dstPath);
       assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
       
+      /* NOTE: append() is not supported in 0.18.
       OutputStream out = dfs.append(file2);
       // appending 1 fileLen should succeed
       out.write(new byte[fileLen]);
@@ -578,6 +579,10 @@
       // verify space after partial append
       c = dfs.getContentSummary(dstPath);
       assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
+      == end of append test == */
+      
+      // reduce quota for quotaDir1 to account for not appending 
+      dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 3 * fileSpace);
       
       // Test set replication :
       
@@ -586,7 +591,7 @@
       
       // verify that space is reduced by file2Len
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace - file2Len);
       
       // now try to increase the replication and and expect an error.
       hasException = false;
@@ -599,7 +604,7 @@
 
       // verify space consumed remains unchanged.
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace - file2Len);
       
       // now increase the quota for quotaDir1 and quotaDir20
       dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 10 * fileSpace);
@@ -609,7 +614,7 @@
       dfs.setReplication(file2, (short)(replication+1));
       // verify increase in space
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace + file2Len);
       
     } finally {
       cluster.shutdown();


Reply via email to