HDFS-7119. Split error checks in AtomicFileOutputStream#close into separate 
conditions to improve diagnostics. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f9a2222
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f9a2222
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f9a2222

Branch: refs/heads/HDFS-6581
Commit: 9f9a2222a2e142a47537bc57ca98fb07a7a78ad4
Parents: 1861b32
Author: cnauroth <cnaur...@apache.org>
Authored: Thu Sep 25 13:33:37 2014 -0700
Committer: cnauroth <cnaur...@apache.org>
Committed: Thu Sep 25 13:33:37 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/util/AtomicFileOutputStream.java       | 14 ++++++++--
 .../hdfs/util/TestAtomicFileOutputStream.java   | 29 ++++++++++++++++++++
 3 files changed, 43 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f9a2222/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 487a547..10dc220 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -588,6 +588,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6808. Add command line option to ask DataNode reload configuration.
     (Lei Xu via Colin Patrick McCabe)
 
+    HDFS-7119. Split error checks in AtomicFileOutputStream#close into separate
+    conditions to improve diagnostics. (cnauroth)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f9a2222/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java
index 3c7d8d8..a89b8cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java
@@ -26,6 +26,8 @@ import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.NativeIOException;
 
 /**
  * A FileOutputStream that has the property that it will only show
@@ -73,9 +75,15 @@ public class AtomicFileOutputStream extends 
FilterOutputStream {
         boolean renamed = tmpFile.renameTo(origFile);
         if (!renamed) {
           // On windows, renameTo does not replace.
-          if (!origFile.delete() || !tmpFile.renameTo(origFile)) {
-            throw new IOException("Could not rename temporary file " +
-                tmpFile + " to " + origFile);
+          if (origFile.exists() && !origFile.delete()) {
+            throw new IOException("Could not delete original file " + 
origFile);
+          }
+          try {
+            NativeIO.renameTo(tmpFile, origFile);
+          } catch (NativeIOException e) {
+            throw new IOException("Could not rename temporary file " + tmpFile
+              + " to " + origFile + " due to failure in native rename. "
+              + e.toString());
           }
         }
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f9a2222/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
index fa91311..b9946c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -30,9 +31,13 @@ import java.io.OutputStream;
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
+import org.apache.hadoop.util.Shell;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 import com.google.common.base.Joiner;
 
@@ -44,6 +49,9 @@ public class TestAtomicFileOutputStream {
   private static final File TEST_DIR = 
PathUtils.getTestDir(TestAtomicFileOutputStream.class);
   
   private static final File DST_FILE = new File(TEST_DIR, "test.txt");
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
   
   @Before
   public void cleanupTestDir() throws IOException {
@@ -119,6 +127,27 @@ public class TestAtomicFileOutputStream {
         DST_FILE.getName(), Joiner.on(",").join(TEST_DIR.list()));
   }
 
+  @Test
+  public void testFailToRename() throws IOException {
+    assumeTrue(Shell.WINDOWS);
+    OutputStream fos = null;
+    try {
+      fos = new AtomicFileOutputStream(DST_FILE);
+      fos.write(TEST_STRING.getBytes());
+      FileUtil.setWritable(TEST_DIR, false);
+      exception.expect(IOException.class);
+      exception.expectMessage("failure in native rename");
+      try {
+        fos.close();
+      } finally {
+        fos = null;
+      }
+    } finally {
+      IOUtils.cleanup(null, fos);
+      FileUtil.setWritable(TEST_DIR, true);
+    }
+  }
+
   /**
    * Create a stream that fails to flush at close time
    */

Reply via email to