Author: szetszwo
Date: Thu Dec 4 14:18:02 2008
New Revision: 723468
URL: http://svn.apache.org/viewvc?rev=723468&view=rev
Log:
HADOOP-4508. Fix FSDataOutputStream.getPos() for append. (dhruba via szetszwo)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FSDataOutputStream.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=723468&r1=723467&r2=723468&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Thu Dec 4 14:18:02 2008
@@ -280,6 +280,9 @@
HADOOP-4632. Fix TestJobHistoryVersion to use test.build.dir instead of the
current workding directory for scratch space. (Amar Kamat via cdouglas)
+ HADOOP-4508. Fix FSDataOutputStream.getPos() for append. (dhruba via
+ szetszwo)
+
Release 0.19.0 - 2008-11-18
INCOMPATIBLE CHANGES
Modified:
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FSDataOutputStream.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FSDataOutputStream.java?rev=723468&r1=723467&r2=723468&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FSDataOutputStream.java
(original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FSDataOutputStream.java Thu
Dec 4 14:18:02 2008
@@ -30,9 +30,11 @@
long position;
public PositionCache(OutputStream out,
- FileSystem.Statistics stats) throws IOException {
+ FileSystem.Statistics stats,
+ long pos) throws IOException {
super(out);
statistics = stats;
+ position = pos;
}
public void write(int b) throws IOException {
@@ -67,7 +69,12 @@
public FSDataOutputStream(OutputStream out, FileSystem.Statistics stats)
throws IOException {
- super(new PositionCache(out, stats));
+ this(out, stats, 0);
+ }
+
+ public FSDataOutputStream(OutputStream out, FileSystem.Statistics stats,
+ long startPosition) throws IOException {
+ super(new PositionCache(out, stats, startPosition));
wrappedStream = out;
}
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=723468&r1=723467&r2=723468&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Thu Dec 4
14:18:02 2008
@@ -2024,6 +2024,7 @@
private int recoveryErrorCount = 0; // number of times block recovery
failed
private int maxRecoveryErrorCount = 5; // try block recovery 5 times
private volatile boolean appendChunk = false; // appending to existing
partial block
+ private long initialFileSize = 0; // at time of file open
private void setLastException(IOException e) {
if (lastException == null) {
@@ -2600,6 +2601,7 @@
LocatedBlock lastBlock, FileStatus stat,
int bytesPerChecksum) throws IOException {
this(src, stat.getBlockSize(), progress, bytesPerChecksum);
+ initialFileSize = stat.getLen(); // length of file when opened
//
// The last partial block of the file has to be filled.
@@ -3155,6 +3157,13 @@
synchronized void setTestFilename(String newname) {
src = newname;
}
+
+ /**
+ * Returns the size of a file as it was when this stream was opened
+ */
+ long getInitialLen() {
+ return initialFileSize;
+ }
}
void reportChecksumFailure(String file, Block blk, DatanodeInfo dn) {
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=723468&r1=723467&r2=723468&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
Thu Dec 4 14:18:02 2008
@@ -32,6 +32,7 @@
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
import org.apache.hadoop.util.*;
@@ -157,8 +158,8 @@
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
- return new FSDataOutputStream(
- dfs.append(getPathName(f), bufferSize, progress), statistics);
+ DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f),
bufferSize, progress);
+ return new FSDataOutputStream(op, statistics, op.getInitialLen());
}
public FSDataOutputStream create(Path f, FsPermission permission,
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=723468&r1=723467&r2=723468&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java Thu
Dec 4 14:18:02 2008
@@ -156,6 +156,10 @@
// write the remainder of the file
stm = fs.append(file1);
+
+ // ensure getPos is set to reflect existing size of the file
+ assertTrue(stm.getPos() > 0);
+
System.out.println("Writing " + (fileSize - mid2) + " bytes to file "
+ file1);
stm.write(fileContents, mid2, fileSize - mid2);
System.out.println("Written second part of file");