HDFS-10271. Extra bytes are getting released from reservedSpace for append (Contributed by Brahma Reddy Battula)
(cherry picked from commit 37e4e45390007536cb53f375d529910d65cd5d19) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/713ba2a7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/713ba2a7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/713ba2a7 Branch: refs/heads/branch-2.6 Commit: 713ba2a7327d131e999e7783d499ff7d2c661099 Parents: 74aca34 Author: Vinayakumar B <vinayakum...@apache.org> Authored: Tue Apr 12 11:32:28 2016 +0530 Committer: Vinayakumar B <vinayakum...@apache.org> Committed: Tue Apr 12 11:33:13 2016 +0530 ---------------------------------------------------------------------- .../datanode/fsdataset/impl/FsDatasetImpl.java | 7 ++- .../fsdataset/impl/TestRbwSpaceReservation.java | 59 ++++++++++++++++++++ 2 files changed, 63 insertions(+), 3 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/713ba2a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 7212432..f00416b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -883,7 +883,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> { // construct a RBW replica with the new GS File blkfile = replicaInfo.getBlockFile(); FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume(); - if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) { + long bytesReserved = estimateBlockLen - replicaInfo.getNumBytes(); + if (v.getAvailable() < bytesReserved) { throw new DiskOutOfSpaceException("Insufficient space for appending to " + replicaInfo); } @@ -891,7 +892,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> { File oldmeta = replicaInfo.getMetaFile(); ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten( replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS, - v, newBlkFile.getParentFile(), Thread.currentThread(), estimateBlockLen); + v, newBlkFile.getParentFile(), Thread.currentThread(), bytesReserved); File newmeta = newReplicaInfo.getMetaFile(); // rename meta file to rbw directory @@ -927,7 +928,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> { // Replace finalized replica by a RBW replica in replicas map volumeMap.add(bpid, newReplicaInfo); - v.reserveSpaceForRbw(estimateBlockLen - replicaInfo.getNumBytes()); + v.reserveSpaceForRbw(bytesReserved); return newReplicaInfo; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/713ba2a7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java index ebf2f3b..efe03eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java @@ -373,4 +373,63 @@ public class TestRbwSpaceReservation { return numFailures; } } + + @Test(timeout = 30000) + public void testReservedSpaceForAppend() throws Exception { + final short replication = 3; + startCluster(BLOCK_SIZE, replication, -1); + final String methodName = GenericTestUtils.getMethodName(); + final Path file = new Path("/" + methodName + ".01.dat"); + + // Write 1 byte to the file and kill the writer. + FSDataOutputStream os = fs.create(file, replication); + os.write(new byte[1024]); + os.close(); + + final Path file2 = new Path("/" + methodName + ".02.dat"); + + // Write 1 byte to the file and keep it open. + FSDataOutputStream os2 = fs.create(file2, replication); + os2.write(new byte[1]); + os2.hflush(); + int expectedFile2Reserved = BLOCK_SIZE - 1; + checkReservedSpace(expectedFile2Reserved); + + // append one byte and verify reservedspace before and after closing + os = fs.append(file); + os.write(new byte[1]); + os.hflush(); + int expectedFile1Reserved = BLOCK_SIZE - 1025; + checkReservedSpace(expectedFile2Reserved + expectedFile1Reserved); + os.close(); + checkReservedSpace(expectedFile2Reserved); + + // append one byte and verify reservedspace before and after abort + os = fs.append(file); + os.write(new byte[1]); + os.hflush(); + expectedFile1Reserved--; + checkReservedSpace(expectedFile2Reserved + expectedFile1Reserved); + DFSTestUtil.abortStream(((DFSOutputStream) os.getWrappedStream())); + checkReservedSpace(expectedFile2Reserved); + } + + private void checkReservedSpace(final long expectedReserved) + throws TimeoutException, InterruptedException, IOException { + for (final DataNode dn : cluster.getDataNodes()) { + for (FsVolumeSpi fsVolume : dn.getFSDataset().getVolumes()) { + final FsVolumeImpl volume = (FsVolumeImpl) fsVolume; + GenericTestUtils.waitFor(new Supplier<Boolean>() { + @Override + public Boolean get() { + LOG.info( + "dn " + dn.getDisplayName() + " space : " + volume + .getReservedForRbw() + ", Expected ReservedSpace :" + + expectedReserved); + return (volume.getReservedForRbw() == expectedReserved); + } + }, 100, 3000); + } + } + } }