Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6df8fa1bc -> a5ec44c65
  refs/heads/branch-2.9 4795efc83 -> 905c37734
  refs/heads/branch-3.0 ccd7d50fa -> cdc16b3a2
  refs/heads/branch-3.1 f67865ed7 -> df54cdbae
  refs/heads/trunk a97a2042f -> 53b807a6a


HDFS-13560. Insufficient system resources exist to complete the requested 
service for some tests on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53b807a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53b807a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53b807a6

Branch: refs/heads/trunk
Commit: 53b807a6a8486cefe0b036f7893de9f619bd44a1
Parents: a97a204
Author: Inigo Goiri <inigo...@apache.org>
Authored: Thu May 17 17:03:23 2018 -0700
Committer: Inigo Goiri <inigo...@apache.org>
Committed: Thu May 17 17:03:23 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/util/Shell.java  | 15 +++++++++++++++
 .../hdfs/server/datanode/TestDirectoryScanner.java   |  3 ++-
 .../datanode/fsdataset/impl/LazyPersistTestCase.java |  3 ++-
 .../hdfs/server/namenode/TestNameNodeMXBean.java     |  7 +++++--
 4 files changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b807a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index c25cba2..04b4b4f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -1388,4 +1388,19 @@ public abstract class Shell {
       return new HashSet<>(CHILD_SHELLS.keySet());
     }
   }
+
+  /**
+   * Static method to return the memory lock limit for datanode.
+   * @param ulimit max value at which memory locked should be capped.
+   * @return long value specifying the memory lock limit.
+   */
+  public static Long getMemlockLimit(Long ulimit) {
+    if (WINDOWS) {
+      // HDFS-13560: if ulimit is too large on Windows, Windows will complain
+      // "1450: Insufficient system resources exist to complete the requested
+      // service". Thus, cap Windows memory lock limit at Integer.MAX_VALUE.
+      return Math.min(Integer.MAX_VALUE, ulimit);
+    }
+    return ulimit;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b807a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index c95c71b..f792523 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.apache.hadoop.util.Shell.getMemlockLimit;
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
@@ -99,7 +100,7 @@ public class TestDirectoryScanner {
     CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
     CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
     CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
-                 Long.MAX_VALUE);
+                 getMemlockLimit(Long.MAX_VALUE));
   }
 
   @Before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b807a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index c412dad..aae59dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -25,6 +25,7 @@ import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.apache.hadoop.util.Shell.getMemlockLimit;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
@@ -421,7 +422,7 @@ public abstract class LazyPersistTestCase {
     private StorageType[] storageTypes = null;
     private int ramDiskReplicaCapacity = -1;
     private long ramDiskStorageLimit = -1;
-    private long maxLockedMemory = Long.MAX_VALUE;
+    private long maxLockedMemory = getMemlockLimit(Long.MAX_VALUE);
     private boolean hasTransientStorage = true;
     private boolean useScr = false;
     private boolean useLegacyBlockReaderLocal = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b807a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 9c165d8..3728420 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -75,6 +75,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.util.Shell.getMemlockLimit;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -103,8 +104,10 @@ public class TestNameNodeMXBean {
   @Test
   public void testNameNodeMXBeanInfo() throws Exception {
     Configuration conf = new Configuration();
+    Long maxLockedMemory = getMemlockLimit(
+        NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
     conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
-      NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
+        maxLockedMemory);
     MiniDFSCluster cluster = null;
 
     try {
@@ -256,7 +259,7 @@ public class TestNameNodeMXBean {
       assertEquals(1, statusMap.get("active").size());
       assertEquals(1, statusMap.get("failed").size());
       assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
-      assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
+      assertEquals(maxLockedMemory *
           cluster.getDataNodes().size(),
               mbs.getAttribute(mxbeanName, "CacheCapacity"));
       assertNull("RollingUpgradeInfo should be null when there is no rolling"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to