Repository: hadoop
Updated Branches:
  refs/heads/HDFS-6581 e4d29fda7 -> 222bf0fe6


HDFS-6948. DN rejects blocks if it has older UC block. Contributed by
Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f02d934f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f02d934f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f02d934f

Branch: refs/heads/HDFS-6581
Commit: f02d934fedf00f0ce43d6f3f9b06d89ccc6851a5
Parents: 6fe5c6b
Author: Kihwal Lee <kih...@apache.org>
Authored: Fri Sep 19 08:50:43 2014 -0500
Committer: Kihwal Lee <kih...@apache.org>
Committed: Fri Sep 19 08:50:43 2014 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 14 ++++++++---
 .../fsdataset/impl/TestWriteToReplica.java      | 25 +++++++++++++++++++-
 3 files changed, 38 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 436d2f0..9d76c37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -564,6 +564,9 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
 
+    HDFS-6948. DN rejects blocks if it has older UC block
+    (Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d28d616..a2179dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1090,9 +1090,17 @@ class FsDatasetImpl implements 
FsDatasetSpi<FsVolumeImpl> {
       ExtendedBlock b) throws IOException {
     ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
b.getBlockId());
     if (replicaInfo != null) {
-      throw new ReplicaAlreadyExistsException("Block " + b +
-          " already exists in state " + replicaInfo.getState() +
-          " and thus cannot be created.");
+      if (replicaInfo.getGenerationStamp() < b.getGenerationStamp()
+          && replicaInfo instanceof ReplicaInPipeline) {
+        // Stop the previous writer
+        ((ReplicaInPipeline)replicaInfo)
+                      
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
+        invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
+      } else {
+        throw new ReplicaAlreadyExistsException("Block " + b +
+            " already exists in state " + replicaInfo.getState() +
+            " and thus cannot be created.");
+      }
     }
     
     FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02d934f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index e6a03d2..a870aa9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -111,7 +111,7 @@ public class TestWriteToReplica {
   
   // test writeToTemporary
   @Test
-  public void testWriteToTempoary() throws Exception {
+  public void testWriteToTemporary() throws Exception {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(new 
HdfsConfiguration()).build();
     try {
       cluster.waitActive();
@@ -475,5 +475,28 @@ public class TestWriteToReplica {
     }
     
     dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+
+    try {
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+      Assert.fail("Should not have created a replica that had already been "
+          + "created " + blocks[NON_EXISTENT]);
+    } catch (Exception e) {
+      Assert.assertTrue(
+          e.getMessage().contains(blocks[NON_EXISTENT].getBlockName()));
+      Assert.assertTrue(e instanceof ReplicaAlreadyExistsException);
+    }
+
+    long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10;
+    blocks[NON_EXISTENT].setGenerationStamp(newGenStamp);
+    try {
+      ReplicaInPipeline replicaInfo =
+                dataSet.createTemporary(StorageType.DEFAULT, 
blocks[NON_EXISTENT]);
+      Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
+      Assert.assertTrue(
+          replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
+    } catch (ReplicaAlreadyExistsException e) {
+      Assert.fail("createRbw() Should have removed the block with the older "
+          + "genstamp and replaced it with the newer one: " + 
blocks[NON_EXISTENT]);
+    }
   }
 }

Reply via email to