Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1023196ce -> 08abb320a


HDFS-6965. NN continues to issue block locations for DNs with full
disks. Contributed by Rushabh Shah.
(cherry picked from commit 0c26412be4b3ec40130b7200506c957f0402ecbc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08abb320
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08abb320
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08abb320

Branch: refs/heads/branch-2
Commit: 08abb320a77acbf16ffd3b028eca2b2108ddece4
Parents: 1023196
Author: Kihwal Lee <kih...@apache.org>
Authored: Tue Sep 16 09:06:43 2014 -0500
Committer: Kihwal Lee <kih...@apache.org>
Committed: Tue Sep 16 09:06:43 2014 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../BlockPlacementPolicyDefault.java            |  2 +-
 .../blockmanagement/TestBlockManager.java       | 55 ++++++++++++++++++++
 3 files changed, 59 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08abb320/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 080f3b1..d945a25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -395,6 +395,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-7032. Add WebHDFS support for reading and writing to encryption zones.
     (clamb via wang)
 
+    HDFS-6965. NN continues to issue block locations for DNs with full disks.
+    (Rushabh Shah via kihwal)
+
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
       HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08abb320/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index e2026c1..f77d4ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -635,7 +635,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
     
     final long requiredSize = blockSize * HdfsConstants.MIN_BLOCKS_FOR_WRITE;
     final long scheduledSize = blockSize * node.getBlocksScheduled();
-    if (requiredSize > node.getRemaining() - scheduledSize) {
+    if (requiredSize > storage.getRemaining() - scheduledSize) {
       logNodeIsNotChosen(storage, "the node does not have enough space ");
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08abb320/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 41af237..1a8262f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -35,17 +35,24 @@ import java.util.List;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -599,5 +606,53 @@ public class TestBlockManager {
         new BlockListAsLongs(null, null));
     assertEquals(1, ds.getBlockReportCount());
   }
+  
+  
+  /**
+   * Tests that a namenode doesn't choose a datanode with full disks to 
+   * store blocks.
+   * @throws Exception
+   */
+  @Test
+  public void testStorageWithRemainingCapacity() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    FileSystem fs = FileSystem.get(conf);
+    Path file1 = null;
+    try {
+      cluster.waitActive();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      final String poolId = namesystem.getBlockPoolId();
+      final DatanodeRegistration nodeReg =
+        DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().
+                       get(0), poolId);
+      final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem,
+                 nodeReg);
+      // By default, MiniDFSCluster will create 1 datanode with 2 storages.
+      // Assigning 64k for remaining storage capacity and will 
+      //create a file with 100k.
+      for(DatanodeStorageInfo storage:  dd.getStorageInfos()) { 
+         storage.setUtilizationForTesting(65536, 0, 65536, 0);
+      }
+      //sum of the remaining capacity of both the storages
+      dd.setRemaining(131072);
+      file1 = new Path("testRemainingStorage.dat");
+      try {
+        DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short)1,
+                       0x1BAD5EED);
+      }
+      catch (RemoteException re) {
+         GenericTestUtils.assertExceptionContains("nodes instead of "
+                       + "minReplication", re);
+      }
+    }
+    finally {
+      // Clean up
+      assertTrue(fs.exists(file1));
+      fs.delete(file1, true);
+      assertTrue(!fs.exists(file1));
+      cluster.shutdown();
+    }
+  }
 }
 

Reply via email to