HDFS-7263. Snapshot read can reveal future bytes for appended files. 
Contributed by Tao Luo.
(cherry picked from commit 8bfef590295372a48bd447b1462048008810ee17)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3827a1ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3827a1ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3827a1ac

Branch: refs/heads/sjlee/hdfs-merge
Commit: 3827a1acdbc4f9fec3179dcafa614734b5fa31bc
Parents: 1aa9e34
Author: Tao Luo <tao....@wandisco.com>
Authored: Wed Oct 29 20:20:11 2014 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Aug 12 21:25:12 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  3 +-
 .../snapshot/TestSnapshotFileLength.java        | 42 +++++++++++++++-----
 2 files changed, 34 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3827a1ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index ff65ebc..db06d3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -801,7 +801,8 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
           }
           int realLen = (int) Math.min(len, (blockEnd - pos + 1L));
           if (locatedBlocks.isLastBlockComplete()) {
-            realLen = (int) Math.min(realLen, locatedBlocks.getFileLength());
+            realLen = (int) Math.min(realLen,
+                locatedBlocks.getFileLength() - pos);
           }
           int result = readBuffer(strategy, off, realLen, corruptedBlockMap);
           

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3827a1ac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
index 32534f0..98aafc1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
@@ -21,7 +21,10 @@ import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 
 
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hdfs.AppendTestUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -55,6 +58,8 @@ public class TestSnapshotFileLength {
 
   @Before
   public void setUp() throws Exception {
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCKSIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCKSIZE);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
                                               .build();
     cluster.waitActive();
@@ -81,40 +86,57 @@ public class TestSnapshotFileLength {
 
     int bytesRead;
     byte[] buffer = new byte[BLOCKSIZE * 8];
+    int origLen = BLOCKSIZE + 1;
+    int toAppend = BLOCKSIZE;
     FSDataInputStream fis = null;
     FileStatus fileStatus = null;
 
     // Create and write a file.
     Path file1 = new Path(sub, file1Name);
-    DFSTestUtil.createFile(hdfs, file1, 0, REPLICATION, SEED);
-    DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, 0, BLOCKSIZE, REPLICATION, 
SEED);
+    DFSTestUtil.appendFile(hdfs, file1, origLen);
 
     // Create a snapshot on the parent directory.
     hdfs.allowSnapshot(sub);
     hdfs.createSnapshot(sub, snapshot1);
 
-    // Write more data to the file.
-    DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);
+    Path file1snap1
+        = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
+
+    // Append to the file.
+    FSDataOutputStream out = hdfs.append(file1);
+    try {
+      AppendTestUtil.write(out, 0, toAppend);
+      // Test reading from snapshot of file that is open for append
+      byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
+      assertThat("Wrong data size in snapshot.",
+          dataFromSnapshot.length, is(origLen));
+    } finally {
+      out.close();
+    }
 
     // Make sure we can read the entire file via its non-snapshot path.
     fileStatus = hdfs.getFileStatus(file1);
-    assertThat(fileStatus.getLen(), is((long) BLOCKSIZE * 2));
+    assertThat(fileStatus.getLen(), is((long) origLen + toAppend));
     fis = hdfs.open(file1);
     bytesRead = fis.read(0, buffer, 0, buffer.length);
-    assertThat(bytesRead, is(BLOCKSIZE * 2));
+    assertThat(bytesRead, is(origLen + toAppend));
     fis.close();
 
     // Try to open the file via its snapshot path.
-    Path file1snap1 =
-        SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
     fis = hdfs.open(file1snap1);
     fileStatus = hdfs.getFileStatus(file1snap1);
-    assertThat(fileStatus.getLen(), is((long) BLOCKSIZE));
+    assertThat(fileStatus.getLen(), is((long) origLen));
 
     // Make sure we can only read up to the snapshot length.
     bytesRead = fis.read(0, buffer, 0, buffer.length);
-    assertThat(bytesRead, is(BLOCKSIZE));
+    assertThat(bytesRead, is(origLen));
     fis.close();
+
+    byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs,
+        file1snap1);
+    assertThat("Wrong data size in snapshot.",
+        dataFromSnapshot.length, is(origLen));
   }
 
   /**

Reply via email to