[hadoop] branch trunk updated: HDFS-14648. Implement DeadNodeDetector basic model. Contributed by Lisheng Sun.

2019-11-15 Thread yqlin
This is an automated email from the ASF dual-hosted git repository.

yqlin pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b3119b9  HDFS-14648. Implement DeadNodeDetector basic model. 
Contributed by Lisheng Sun.
b3119b9 is described below

commit b3119b9ab60a19d624db476c4e1c53410870c7a6
Author: Yiqun Lin 
AuthorDate: Sat Nov 16 11:32:41 2019 +0800

HDFS-14648. Implement DeadNodeDetector basic model. Contributed by Lisheng 
Sun.
---
 .../java/org/apache/hadoop/hdfs/ClientContext.java |  49 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java |  98 +++
 .../org/apache/hadoop/hdfs/DFSInputStream.java |  98 +++
 .../apache/hadoop/hdfs/DFSStripedInputStream.java  |   6 +-
 .../org/apache/hadoop/hdfs/DeadNodeDetector.java   | 185 +
 .../hadoop/hdfs/client/HdfsClientConfigKeys.java   |   4 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java |  31 +++-
 .../src/main/resources/hdfs-default.xml|   9 +
 .../apache/hadoop/hdfs/TestDeadNodeDetection.java  | 183 
 9 files changed, 617 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
index ad1b359..abb039c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.ScriptBasedMapping;
+import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -118,6 +119,19 @@ public class ClientContext {
   private NodeBase clientNode;
   private boolean topologyResolutionEnabled;
 
+  private Daemon deadNodeDetectorThr = null;
+
+  /**
+   * The switch to DeadNodeDetector.
+   */
+  private boolean deadNodeDetectionEnabled = false;
+
+  /**
+   * Detect the dead datanodes in advance, and share this information among all
+   * the DFSInputStreams in the same client.
+   */
+  private DeadNodeDetector deadNodeDetector = null;
+
   private ClientContext(String name, DfsClientConf conf,
   Configuration config) {
 final ShortCircuitConf scConf = conf.getShortCircuitConf();
@@ -134,6 +148,12 @@ public class ClientContext {
 
 this.byteArrayManager = ByteArrayManager.newInstance(
 conf.getWriteByteArrayManagerConf());
+this.deadNodeDetectionEnabled = conf.isDeadNodeDetectionEnabled();
+if (deadNodeDetectionEnabled && deadNodeDetector == null) {
+  deadNodeDetector = new DeadNodeDetector(name);
+  deadNodeDetectorThr = new Daemon(deadNodeDetector);
+  deadNodeDetectorThr.start();
+}
 initTopologyResolution(config);
   }
 
@@ -251,4 +271,33 @@ public class ClientContext {
 datanodeInfo.getNetworkLocation());
 return NetworkTopology.getDistanceByPath(clientNode, node);
   }
+
+  /**
+   * The switch to DeadNodeDetector. If true, DeadNodeDetector is available.
+   */
+  public boolean isDeadNodeDetectionEnabled() {
+return deadNodeDetectionEnabled;
+  }
+
+  /**
+   * Obtain DeadNodeDetector of the current client.
+   */
+  public DeadNodeDetector getDeadNodeDetector() {
+return deadNodeDetector;
+  }
+
+  /**
+   * Close dead node detector thread.
+   */
+  public void stopDeadNodeDetectorThread() {
+if (deadNodeDetectorThr != null) {
+  deadNodeDetectorThr.interrupt();
+  try {
+deadNodeDetectorThr.join(3000);
+  } catch (InterruptedException e) {
+LOG.warn("Encountered exception while waiting to join on dead " +
+"node detector thread.", e);
+  }
+}
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 56280f3..c19aa96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -44,6 +44,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -631,6 +633,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   // lease renewal stops when all files are closed
   

[hadoop] branch branch-2.10 updated: Revert "YARN-7411. Inter-Queue preemption's computeFixpointAllocation need to handle absolute resources while computing normalizedGuarantee. (Sunil G via wangda)"

2019-11-15 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 9087eb9  Revert "YARN-7411. Inter-Queue preemption's 
computeFixpointAllocation need to handle absolute resources while computing 
normalizedGuarantee. (Sunil G via wangda)"
9087eb9 is described below

commit 9087eb9423cefd2fb5b924a9e4db90f9e9e1b5a6
Author: Eric E Payne 
AuthorDate: Fri Nov 15 22:11:11 2019 +

Revert "YARN-7411. Inter-Queue preemption's computeFixpointAllocation need 
to handle absolute resources while computing normalizedGuarantee. (Sunil G via 
wangda)"

Pulling this back into branch-2 prevented nodemanagers from coming up 
if extended resources are present.

This reverts commit 5e14cddab5164cc2c027323e7dd2ba47aa0b738a.
---
 .../yarn/api/records/impl/pb/ResourcePBImpl.java   | 12 
 .../util/resource/DefaultResourceCalculator.java   |  8 ---
 .../util/resource/DominantResourceCalculator.java  | 21 --
 .../yarn/util/resource/ResourceCalculator.java | 14 +---
 .../hadoop/yarn/util/resource/Resources.java   |  5 --
 .../AbstractPreemptableResourceCalculator.java | 24 +--
 .../monitor/capacity/TempQueuePerPartition.java| 12 ++--
 ...ionalCapacityPreemptionPolicyMockFramework.java | 14 
 ...lCapacityPreemptionPolicyForNodePartitions.java | 76 --
 9 files changed, 20 insertions(+), 166 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 4f90133..401e0c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -26,6 +26,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
@@ -151,6 +152,17 @@ public class ResourcePBImpl extends Resource {
 .newInstance(ResourceInformation.VCORES);
 this.setMemorySize(p.getMemory());
 this.setVirtualCores(p.getVirtualCores());
+
+// Update missing resource information on respective index.
+updateResourceInformationMap(types);
+  }
+
+  private void updateResourceInformationMap(ResourceInformation[] types) {
+for (int i = 0; i < types.length; i++) {
+  if (resources[i] == null) {
+resources[i] = ResourceInformation.newInstance(types[i]);
+  }
+}
   }
 
   private static ResourceInformation newDefaultInformation(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 58db217..bdf60bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -112,14 +112,6 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public Resource multiplyAndNormalizeUp(Resource r, double[] by,
-  Resource stepFactor) {
-return Resources.createResource(
-roundUp((long) (r.getMemorySize() * by[0] + 0.5),
-stepFactor.getMemorySize()));
-  }
-
-  @Override
   public Resource multiplyAndNormalizeDown(Resource r, double by,
   Resource stepFactor) {
 return Resources.createResource(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index edd3415..d64f03e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 

[hadoop] branch branch-2 updated: Revert "YARN-7411. Inter-Queue preemption's computeFixpointAllocation need to handle absolute resources while computing normalizedGuarantee. (Sunil G via wangda)"

2019-11-15 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new a74411e  Revert "YARN-7411. Inter-Queue preemption's 
computeFixpointAllocation need to handle absolute resources while computing 
normalizedGuarantee. (Sunil G via wangda)"
a74411e is described below

commit a74411ebaa7fbfa7bc897aaf708baf04eb257a99
Author: Eric E Payne 
AuthorDate: Fri Nov 15 22:01:28 2019 +

Revert "YARN-7411. Inter-Queue preemption's computeFixpointAllocation need 
to handle absolute resources while computing normalizedGuarantee. (Sunil G via 
wangda)"

Pulling this back into branch-2 prevented nodemanagers from coming up if 
extended resources are present.

This reverts commit ab83765a6c587f8378daed741f7d05598d076188.
---
 .../yarn/api/records/impl/pb/ResourcePBImpl.java   | 12 
 .../util/resource/DefaultResourceCalculator.java   |  8 ---
 .../util/resource/DominantResourceCalculator.java  | 21 --
 .../yarn/util/resource/ResourceCalculator.java | 14 +---
 .../hadoop/yarn/util/resource/Resources.java   |  5 --
 .../AbstractPreemptableResourceCalculator.java | 24 +--
 .../monitor/capacity/TempQueuePerPartition.java| 12 ++--
 ...ionalCapacityPreemptionPolicyMockFramework.java | 14 
 ...lCapacityPreemptionPolicyForNodePartitions.java | 76 --
 9 files changed, 20 insertions(+), 166 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 4f90133..401e0c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -26,6 +26,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
@@ -151,6 +152,17 @@ public class ResourcePBImpl extends Resource {
 .newInstance(ResourceInformation.VCORES);
 this.setMemorySize(p.getMemory());
 this.setVirtualCores(p.getVirtualCores());
+
+// Update missing resource information on respective index.
+updateResourceInformationMap(types);
+  }
+
+  private void updateResourceInformationMap(ResourceInformation[] types) {
+for (int i = 0; i < types.length; i++) {
+  if (resources[i] == null) {
+resources[i] = ResourceInformation.newInstance(types[i]);
+  }
+}
   }
 
   private static ResourceInformation newDefaultInformation(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 58db217..bdf60bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -112,14 +112,6 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public Resource multiplyAndNormalizeUp(Resource r, double[] by,
-  Resource stepFactor) {
-return Resources.createResource(
-roundUp((long) (r.getMemorySize() * by[0] + 0.5),
-stepFactor.getMemorySize()));
-  }
-
-  @Override
   public Resource multiplyAndNormalizeDown(Resource r, double by,
   Resource stepFactor) {
 return Resources.createResource(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index edd3415..d64f03e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 

[hadoop] branch branch-3.2 updated: HADOOP-16705. MBeanInfoBuilder puts unnecessary memory pressure on the system with a debug log.

2019-11-15 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new b914f38  HADOOP-16705. MBeanInfoBuilder puts unnecessary memory 
pressure on the system with a debug log.
b914f38 is described below

commit b914f385a0f721c9bac662865265874d74d7ef71
Author: Lukas Majercak 
AuthorDate: Thu Nov 14 09:54:02 2019 -0800

HADOOP-16705. MBeanInfoBuilder puts unnecessary memory pressure on the 
system with a debug log.

MBeanInfoBuilder's get() method DEBUG logs all the MBeanAttributeInfo 
attributes that it gathered. This can have a high memory churn that can be 
easily avoided.

(cherry picked from commit c73334a924d2009d136a8ee62278aaac53418a24)
---
 .../src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
index 5282119..cdd0ba4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
@@ -106,7 +106,7 @@ class MBeanInfoBuilder implements MetricsVisitor {
   }
   ++curRecNo;
 }
-MetricsSystemImpl.LOG.debug(attrs.toString());
+MetricsSystemImpl.LOG.debug("{}", attrs);
 MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()];
 return new MBeanInfo(name, description, attrs.toArray(attrsArray),
  null, null, null); // no ops/ctors/notifications


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-16705. MBeanInfoBuilder puts unnecessary memory pressure on the system with a debug log.

2019-11-15 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 50d98a6  HADOOP-16705. MBeanInfoBuilder puts unnecessary memory 
pressure on the system with a debug log.
50d98a6 is described below

commit 50d98a6568c6662d2757cf73cb9e066b4f808c75
Author: Lukas Majercak 
AuthorDate: Thu Nov 14 09:54:02 2019 -0800

HADOOP-16705. MBeanInfoBuilder puts unnecessary memory pressure on the 
system with a debug log.

MBeanInfoBuilder's get() method DEBUG logs all the MBeanAttributeInfo 
attributes that it gathered. This can have a high memory churn that can be 
easily avoided.

(cherry picked from commit c73334a924d2009d136a8ee62278aaac53418a24)
---
 .../src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
index 5282119..cdd0ba4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
@@ -106,7 +106,7 @@ class MBeanInfoBuilder implements MetricsVisitor {
   }
   ++curRecNo;
 }
-MetricsSystemImpl.LOG.debug(attrs.toString());
+MetricsSystemImpl.LOG.debug("{}", attrs);
 MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()];
 return new MBeanInfo(name, description, attrs.toArray(attrsArray),
  null, null, null); // no ops/ctors/notifications


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.0 updated: HADOOP-16705. MBeanInfoBuilder puts unnecessary memory pressure on the system with a debug log.

2019-11-15 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new 049d061  HADOOP-16705. MBeanInfoBuilder puts unnecessary memory 
pressure on the system with a debug log.
049d061 is described below

commit 049d061001957477494ff86bb03fbe34812502ef
Author: Lukas Majercak 
AuthorDate: Thu Nov 14 09:54:02 2019 -0800

HADOOP-16705. MBeanInfoBuilder puts unnecessary memory pressure on the 
system with a debug log.

MBeanInfoBuilder's get() method DEBUG logs all the MBeanAttributeInfo 
attributes that it gathered. This can have a high memory churn that can be 
easily avoided.

(cherry picked from commit c73334a924d2009d136a8ee62278aaac53418a24)
---
 .../src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
index 5282119..cdd0ba4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
@@ -106,7 +106,7 @@ class MBeanInfoBuilder implements MetricsVisitor {
   }
   ++curRecNo;
 }
-MetricsSystemImpl.LOG.debug(attrs.toString());
+MetricsSystemImpl.LOG.debug("{}", attrs);
 MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()];
 return new MBeanInfo(name, description, attrs.toArray(attrsArray),
  null, null, null); // no ops/ctors/notifications


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (c892a87 -> 67f2c49)

2019-11-15 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from c892a87  HDFS-14882. Consider DataNode load when #getBlockLocation. 
Contributed by Xiaoqiao He.
 add 67f2c49  HDFS-14802. The feature of protect directories should be used 
in RenameOp (#1669)

No new revisions were added by this update.

Summary of changes:
 .../src/main/resources/core-default.xml|  2 +-
 .../src/site/markdown/filesystem/filesystem.md | 15 +++---
 .../main/java/org/apache/hadoop/hdfs/DFSUtil.java  | 51 
 .../hadoop/hdfs/server/namenode/FSDirDeleteOp.java | 50 +---
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |  5 ++
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  4 +-
 .../server/namenode/TestProtectedDirectories.java  | 54 +-
 7 files changed, 123 insertions(+), 58 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14882. Consider DataNode load when #getBlockLocation. Contributed by Xiaoqiao He.

2019-11-15 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c892a87  HDFS-14882. Consider DataNode load when #getBlockLocation. 
Contributed by Xiaoqiao He.
c892a87 is described below

commit c892a879ddce3abfd51c8609c81148bf6e4f9daa
Author: He Xiaoqiao 
AuthorDate: Fri Nov 15 12:15:33 2019 -0800

HDFS-14882. Consider DataNode load when #getBlockLocation. Contributed by 
Xiaoqiao He.

Signed-off-by: Wei-Chiu Chuang 

Reviewed-by: Inigo Goiri 
Reviewed-by: Istvan Fajth 
---
 .../org/apache/hadoop/net/NetworkTopology.java | 68 ++
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 ++
 .../server/blockmanagement/DatanodeManager.java| 24 ++-
 .../src/main/resources/hdfs-default.xml| 13 +++-
 .../blockmanagement/TestDatanodeManager.java   | 82 ++
 .../org/apache/hadoop/net/TestNetworkTopology.java | 34 ++---
 6 files changed, 199 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 724cec3..66799f5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -31,6 +31,7 @@ import org.slf4j.LoggerFactory;
 import java.util.*;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Consumer;
 
 /** The class represents a cluster of computer with a tree hierarchical
  * network topology.
@@ -874,11 +875,33 @@ public class NetworkTopology {
  * This method is called if the reader is a datanode,
  * so nonDataNodeReader flag is set to false.
  */
-sortByDistance(reader, nodes, activeLen, false);
+sortByDistance(reader, nodes, activeLen, list -> 
Collections.shuffle(list));
   }
 
   /**
-   * Sort nodes array by network distance to reader.
+   * Sort nodes array by network distance to reader with secondary sort.
+   * 
+   * In a three-level topology, a node can be either local, on the same rack,
+   * or on a different rack from the reader. Sorting the nodes based on network
+   * distance from the reader reduces network traffic and improves
+   * performance.
+   * 
+   * As an additional twist, we also randomize the nodes at each network
+   * distance. This helps with load balancing when there is data skew.
+   *
+   * @param readerNode where data will be read
+   * @param nodes Available replicas with the requested data
+   * @param activeLen Number of active nodes at the front of the array
+   * @param secondarySort a secondary sorting strategy which can inject into
+   * that point from outside to help sort the same distance.
+   */
+  public  void sortByDistance(Node reader, T[] nodes,
+  int activeLen, Consumer> secondarySort){
+sortByDistance(reader, nodes, activeLen, secondarySort, false);
+  }
+
+  /**
+   * Sort nodes array by network distance to reader with secondary sort.
*  using network location. This is used when the reader
* is not a datanode. Sorting the nodes based on network distance
* from the reader reduces network traffic and improves
@@ -895,7 +918,27 @@ public class NetworkTopology {
  * This method is called if the reader is not a datanode,
  * so nonDataNodeReader flag is set to true.
  */
-sortByDistance(reader, nodes, activeLen, true);
+sortByDistanceUsingNetworkLocation(reader, nodes, activeLen,
+list -> Collections.shuffle(list));
+  }
+
+  /**
+   * Sort nodes array by network distance to reader.
+   *  using network location. This is used when the reader
+   * is not a datanode. Sorting the nodes based on network distance
+   * from the reader reduces network traffic and improves
+   * performance.
+   * 
+   *
+   * @param readerNode where data will be read
+   * @param nodes Available replicas with the requested data
+   * @param activeLen Number of active nodes at the front of the array
+   * @param secondarySort a secondary sorting strategy which can inject into
+   * that point from outside to help sort the same distance.
+   */
+  public  void sortByDistanceUsingNetworkLocation(Node reader,
+  T[] nodes, int activeLen, Consumer> secondarySort) {
+sortByDistance(reader, nodes, activeLen, secondarySort, true);
   }
 
   /**
@@ -909,7 +952,8 @@ public class NetworkTopology {
* @param activeLen Number of active nodes at the front of the array
* @param nonDataNodeReader True if the reader is not a datanode
*/
-  private void sortByDistance(Node reader, Node[] nodes, int 

[hadoop] branch branch-3.1 updated: HDFS-14973. More strictly enforce Balancer/Mover/SPS throttling of getBlocks RPCs to NameNodes. Contributed by Erik Krogen.

2019-11-15 Thread xkrogen
This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 17779ad  HDFS-14973. More strictly enforce Balancer/Mover/SPS 
throttling of getBlocks RPCs to NameNodes. Contributed by Erik Krogen.
17779ad is described below

commit 17779adb32569bd689a93802512003b6a6816bd4
Author: Erik Krogen 
AuthorDate: Fri Nov 8 08:57:14 2019 -0800

HDFS-14973. More strictly enforce Balancer/Mover/SPS throttling of 
getBlocks RPCs to NameNodes. Contributed by Erik Krogen.

(cherry picked from b2cc8b6b4a78f31cdd937dc4d1a2255f80c5881e)
(cherry picked from 60655bfe54e138957ef5bbf480a4541bd83152fd)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 +++
 .../hadoop/hdfs/server/balancer/Dispatcher.java| 37 +---
 .../hdfs/server/balancer/NameNodeConnector.java| 20 ++-
 .../src/main/resources/hdfs-default.xml| 11 +++-
 .../hadoop/hdfs/server/balancer/TestBalancer.java  | 66 ++
 .../hdfs/server/balancer/TestBalancerRPCDelay.java | 28 -
 6 files changed, 108 insertions(+), 62 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ce0c6d3..5bae88c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -591,6 +591,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.datanode.metrics.logger.period.seconds";
   public static final int DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
   600;
+  /**
+   * The maximum number of getBlocks RPCs data movement utilities can make to
+   * a NameNode per second. Values <= 0 disable throttling. This affects
+   * anything that uses a NameNodeConnector, i.e., the Balancer, Mover,
+   * and StoragePolicySatisfier.
+   */
+  public static final String  DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY = 
"dfs.namenode.get-blocks.max-qps";
+  public static final int DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT = 20;
 
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = 
"dfs.balancer.movedWinWidth";
   public static final longDFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 060c013..0c60588 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -927,10 +927,8 @@ public class Dispatcher {
  * move tasks or it has received enough blocks from the namenode, or the
  * elapsed time of the iteration has exceeded the max time limit.
  *
- * @param delay - time to sleep before sending getBlocks. Intended to
- * disperse Balancer RPCs to NameNode for large clusters. See HDFS-11384.
  */
-private void dispatchBlocks(long delay) {
+private void dispatchBlocks() {
   this.blocksToReceive = 2 * getScheduledSize();
   long previousMoveTimestamp = Time.monotonicNow();
   while (getScheduledSize() > 0 && !isIterationOver()
@@ -955,25 +953,15 @@ public class Dispatcher {
 if (shouldFetchMoreBlocks()) {
   // fetch new blocks
   try {
-if(delay > 0) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Sleeping " + delay + "  msec.");
-  }
-  Thread.sleep(delay);
-}
 final long received = getBlockList();
 if (received == 0) {
   return;
 }
 blocksToReceive -= received;
 continue;
-  } catch (InterruptedException ignored) {
-// nothing to do
   } catch (IOException e) {
 LOG.warn("Exception while getting reportedBlock list", e);
 return;
-  } finally {
-delay = 0L;
   }
 } else {
   // jump out of while-loop after the configured timeout.
@@ -1165,12 +1153,6 @@ public class Dispatcher {
   }
 
   /**
-   * The best-effort limit on the number of RPCs per second
-   * the Balancer will send to the NameNode.
-   */
-  final static int BALANCER_NUM_RPC_PER_SEC = 20;
-
-  /**
* Dispatch block moves for each source. The thread selects blocks to move &
* sends request to proxy source to initiate block move. The process is flow
* controlled. Block selection is blocked if there are too many un-confirmed
@@ -1185,12 

[hadoop] annotated tag remove-ozone updated (9f0610f -> 5316d3c)

2019-11-15 Thread dineshc
This is an automated email from the ASF dual-hosted git repository.

dineshc pushed a change to annotated tag remove-ozone
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


*** WARNING: tag remove-ozone was modified! ***

from 9f0610f  (commit)
  to 5316d3c  (tag)
 tagging 9f0610fb83ae064e2e2c854fb2e9c9dc4cbc1646 (commit)
  by Dinesh Chitlangia
  on Fri Nov 15 14:54:48 2019 -0500

- Log -
Removing the Ozone code from the Hadoop Repo.
Ozone has its own code repo now, at github.com/apache/hadoop-ozone.
We are removing this to avoid confusion for new contributors.
Here is the reference to the mail thread discussion on this topic.
https://lists.apache.org/thread.html/fafe8831a30b33182d8754b38fe26c7a58d8d4b4fe10adefba759b5d@%3Ccommon-dev.hadoop.apache.org%3E
---


No new revisions were added by this update.

Summary of changes:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: YARN-8179: Preemption does not happen due to natural_termination_factor when DRF is used. Contributed by Kyungwan Nam.

2019-11-15 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new dc1c9b8  YARN-8179: Preemption does not happen due to 
natural_termination_factor when DRF is used. Contributed by Kyungwan Nam.
dc1c9b8 is described below

commit dc1c9b8710b8f35e795ca23f517c8c1b57db7304
Author: Eric E Payne 
AuthorDate: Mon May 21 20:14:58 2018 +

YARN-8179: Preemption does not happen due to natural_termination_factor 
when DRF is used. Contributed by Kyungwan Nam.

(cherry picked from commit 0b4c44bdeef62945b592d5761666ad026b629c0b)
---
 .../capacity/PreemptableResourceCalculator.java|  7 ++-
 ...lCapacityPreemptionPolicyInterQueueWithDRF.java | 56 ++
 2 files changed, 61 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 907785e..a5576e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -198,8 +198,11 @@ public class PreemptableResourceCalculator
*/
   Resource resToObtain = qT.toBePreempted;
   if (!isReservedPreemptionCandidatesSelector) {
-resToObtain = Resources.multiply(qT.toBePreempted,
-context.getNaturalTerminationFactor());
+if (Resources.greaterThan(rc, clusterResource, resToObtain,
+Resource.newInstance(0, 0))) {
+  resToObtain = Resources.multiplyAndNormalizeUp(rc, 
qT.toBePreempted,
+  context.getNaturalTerminationFactor(), 
Resource.newInstance(1, 1));
+}
   }
 
   // Only add resToObtain when it >= 0
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
index 0d6d350..c8a1f0f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
@@ -18,15 +18,28 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.junit.Before;
 import org.junit.Test;
 
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
 extends ProportionalCapacityPreemptionPolicyMockFramework {
+
+  @Before
+  public void setup() {
+super.setup();
+rc = new DominantResourceCalculator();
+when(cs.getResourceCalculator()).thenReturn(rc);
+policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
+  }
+
   @Test
   public void testInterQueuePreemptionWithMultipleResource()
   throws Exception {
@@ -65,4 +78,47 @@ public class 
TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
 new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
 getAppAttemptId(2;
   }
+
+  @Test
+  public void testInterQueuePreemptionWithNaturalTerminationFactor()
+  throws Exception {
+/**
+ * Queue structure is:
+ *
+ * 
+ *   root
+ *  /   \
+ * a b
+ * 
+ *
+ * Guaranteed resource of a/b are 50:50 Total cluster resource = 100
+ * Scenario: All 

[hadoop] branch branch-2 updated: YARN-8179: Preemption does not happen due to natural_termination_factor when DRF is used. Contributed by Kyungwan Nam.

2019-11-15 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 6973f78  YARN-8179: Preemption does not happen due to 
natural_termination_factor when DRF is used. Contributed by Kyungwan Nam.
6973f78 is described below

commit 6973f78a731cb2d75ef3010927cf3f300e28932e
Author: Eric E Payne 
AuthorDate: Mon May 21 20:14:58 2018 +

YARN-8179: Preemption does not happen due to natural_termination_factor 
when DRF is used. Contributed by Kyungwan Nam.

(cherry picked from commit 0b4c44bdeef62945b592d5761666ad026b629c0b)
---
 .../capacity/PreemptableResourceCalculator.java|  7 ++-
 ...lCapacityPreemptionPolicyInterQueueWithDRF.java | 56 ++
 2 files changed, 61 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 907785e..a5576e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -198,8 +198,11 @@ public class PreemptableResourceCalculator
*/
   Resource resToObtain = qT.toBePreempted;
   if (!isReservedPreemptionCandidatesSelector) {
-resToObtain = Resources.multiply(qT.toBePreempted,
-context.getNaturalTerminationFactor());
+if (Resources.greaterThan(rc, clusterResource, resToObtain,
+Resource.newInstance(0, 0))) {
+  resToObtain = Resources.multiplyAndNormalizeUp(rc, 
qT.toBePreempted,
+  context.getNaturalTerminationFactor(), 
Resource.newInstance(1, 1));
+}
   }
 
   // Only add resToObtain when it >= 0
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
index 0d6d350..c8a1f0f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
@@ -18,15 +18,28 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.junit.Before;
 import org.junit.Test;
 
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
 extends ProportionalCapacityPreemptionPolicyMockFramework {
+
+  @Before
+  public void setup() {
+super.setup();
+rc = new DominantResourceCalculator();
+when(cs.getResourceCalculator()).thenReturn(rc);
+policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
+  }
+
   @Test
   public void testInterQueuePreemptionWithMultipleResource()
   throws Exception {
@@ -65,4 +78,47 @@ public class 
TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
 new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
 getAppAttemptId(2;
   }
+
+  @Test
+  public void testInterQueuePreemptionWithNaturalTerminationFactor()
+  throws Exception {
+/**
+ * Queue structure is:
+ *
+ * 
+ *   root
+ *  /   \
+ * a b
+ * 
+ *
+ * Guaranteed resource of a/b are 50:50 Total cluster resource = 100
+ * Scenario: All resources 

[hadoop] branch branch-3.2 updated: HDFS-14973. More strictly enforce Balancer/Mover/SPS throttling of getBlocks RPCs to NameNodes. Contributed by Erik Krogen.

2019-11-15 Thread xkrogen
This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 60655bf  HDFS-14973. More strictly enforce Balancer/Mover/SPS 
throttling of getBlocks RPCs to NameNodes. Contributed by Erik Krogen.
60655bf is described below

commit 60655bfe54e138957ef5bbf480a4541bd83152fd
Author: Erik Krogen 
AuthorDate: Fri Nov 8 08:57:14 2019 -0800

HDFS-14973. More strictly enforce Balancer/Mover/SPS throttling of 
getBlocks RPCs to NameNodes. Contributed by Erik Krogen.

(cherry picked from b2cc8b6b4a78f31cdd937dc4d1a2255f80c5881e)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 +++
 .../hadoop/hdfs/server/balancer/Dispatcher.java| 37 +---
 .../hdfs/server/balancer/NameNodeConnector.java| 19 ++-
 .../src/main/resources/hdfs-default.xml| 11 +++-
 .../hadoop/hdfs/server/balancer/TestBalancer.java  | 66 ++
 .../hdfs/server/balancer/TestBalancerRPCDelay.java | 28 -
 6 files changed, 108 insertions(+), 61 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f2df396..ed32485 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -604,6 +604,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.datanode.metrics.logger.period.seconds";
   public static final int DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
   600;
+  /**
+   * The maximum number of getBlocks RPCs data movement utilities can make to
+   * a NameNode per second. Values <= 0 disable throttling. This affects
+   * anything that uses a NameNodeConnector, i.e., the Balancer, Mover,
+   * and StoragePolicySatisfier.
+   */
+  public static final String  DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY = 
"dfs.namenode.get-blocks.max-qps";
+  public static final int DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT = 20;
 
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = 
"dfs.balancer.movedWinWidth";
   public static final longDFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 8a71417..03f2686 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -927,10 +927,8 @@ public class Dispatcher {
  * move tasks or it has received enough blocks from the namenode, or the
  * elapsed time of the iteration has exceeded the max time limit.
  *
- * @param delay - time to sleep before sending getBlocks. Intended to
- * disperse Balancer RPCs to NameNode for large clusters. See HDFS-11384.
  */
-private void dispatchBlocks(long delay) {
+private void dispatchBlocks() {
   this.blocksToReceive = 2 * getScheduledSize();
   long previousMoveTimestamp = Time.monotonicNow();
   while (getScheduledSize() > 0 && !isIterationOver()
@@ -955,25 +953,15 @@ public class Dispatcher {
 if (shouldFetchMoreBlocks()) {
   // fetch new blocks
   try {
-if(delay > 0) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Sleeping " + delay + "  msec.");
-  }
-  Thread.sleep(delay);
-}
 final long received = getBlockList();
 if (received == 0) {
   return;
 }
 blocksToReceive -= received;
 continue;
-  } catch (InterruptedException ignored) {
-// nothing to do
   } catch (IOException e) {
 LOG.warn("Exception while getting reportedBlock list", e);
 return;
-  } finally {
-delay = 0L;
   }
 } else {
   // jump out of while-loop after the configured timeout.
@@ -1165,12 +1153,6 @@ public class Dispatcher {
   }
 
   /**
-   * The best-effort limit on the number of RPCs per second
-   * the Balancer will send to the NameNode.
-   */
-  final static int BALANCER_NUM_RPC_PER_SEC = 20;
-
-  /**
* Dispatch block moves for each source. The thread selects blocks to move &
* sends request to proxy source to initiate block move. The process is flow
* controlled. Block selection is blocked if there are too many un-confirmed
@@ -1185,12 +1167,7 @@ public class Dispatcher {
 int concurrentThreads = 

[hadoop] branch trunk updated: HDFS-14973. More strictly enforce Balancer/Mover/SPS throttling of getBlocks RPCs to NameNodes. Contributed by Erik Krogen.

2019-11-15 Thread xkrogen
This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b2cc8b6  HDFS-14973. More strictly enforce Balancer/Mover/SPS 
throttling of getBlocks RPCs to NameNodes. Contributed by Erik Krogen.
b2cc8b6 is described below

commit b2cc8b6b4a78f31cdd937dc4d1a2255f80c5881e
Author: Erik Krogen 
AuthorDate: Fri Nov 8 08:57:14 2019 -0800

HDFS-14973. More strictly enforce Balancer/Mover/SPS throttling of 
getBlocks RPCs to NameNodes. Contributed by Erik Krogen.
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 +++
 .../hadoop/hdfs/server/balancer/Dispatcher.java| 37 +---
 .../hdfs/server/balancer/NameNodeConnector.java| 19 ++-
 .../src/main/resources/hdfs-default.xml| 11 +++-
 .../hadoop/hdfs/server/balancer/TestBalancer.java  | 66 ++
 .../hdfs/server/balancer/TestBalancerRPCDelay.java | 28 -
 6 files changed, 108 insertions(+), 61 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 16a29dd..a2df317 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -614,6 +614,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.datanode.metrics.logger.period.seconds";
   public static final int DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
   600;
+  /**
+   * The maximum number of getBlocks RPCs data movement utilities can make to
+   * a NameNode per second. Values <= 0 disable throttling. This affects
+   * anything that uses a NameNodeConnector, i.e., the Balancer, Mover,
+   * and StoragePolicySatisfier.
+   */
+  public static final String  DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY = 
"dfs.namenode.get-blocks.max-qps";
+  public static final int DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT = 20;
 
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = 
"dfs.balancer.movedWinWidth";
   public static final longDFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index cf3dc3b..c70 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -928,10 +928,8 @@ public class Dispatcher {
  * move tasks or it has received enough blocks from the namenode, or the
  * elapsed time of the iteration has exceeded the max time limit.
  *
- * @param delay - time to sleep before sending getBlocks. Intended to
- * disperse Balancer RPCs to NameNode for large clusters. See HDFS-11384.
  */
-private void dispatchBlocks(long delay) {
+private void dispatchBlocks() {
   this.blocksToReceive = 2 * getScheduledSize();
   long previousMoveTimestamp = Time.monotonicNow();
   while (getScheduledSize() > 0 && !isIterationOver()
@@ -956,25 +954,15 @@ public class Dispatcher {
 if (shouldFetchMoreBlocks()) {
   // fetch new blocks
   try {
-if(delay > 0) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Sleeping " + delay + "  msec.");
-  }
-  Thread.sleep(delay);
-}
 final long received = getBlockList();
 if (received == 0) {
   return;
 }
 blocksToReceive -= received;
 continue;
-  } catch (InterruptedException ignored) {
-// nothing to do
   } catch (IOException e) {
 LOG.warn("Exception while getting reportedBlock list", e);
 return;
-  } finally {
-delay = 0L;
   }
 } else {
   // jump out of while-loop after the configured timeout.
@@ -1167,12 +1155,6 @@ public class Dispatcher {
   }
 
   /**
-   * The best-effort limit on the number of RPCs per second
-   * the Balancer will send to the NameNode.
-   */
-  final static int BALANCER_NUM_RPC_PER_SEC = 20;
-
-  /**
* Dispatch block moves for each source. The thread selects blocks to move &
* sends request to proxy source to initiate block move. The process is flow
* controlled. Block selection is blocked if there are too many un-confirmed
@@ -1187,12 +1169,7 @@ public class Dispatcher {
 int concurrentThreads = Math.min(sources.size(),
 

[hadoop] branch branch-2.10 updated: YARN-7411. Inter-Queue preemption's computeFixpointAllocation need to handle absolute resources while computing normalizedGuarantee. (Sunil G via wangda)

2019-11-15 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 5e14cdd  YARN-7411. Inter-Queue preemption's computeFixpointAllocation 
need to handle absolute resources while computing normalizedGuarantee. (Sunil G 
via wangda)
5e14cdd is described below

commit 5e14cddab5164cc2c027323e7dd2ba47aa0b738a
Author: Wangda Tan 
AuthorDate: Mon Nov 13 16:26:27 2017 -0800

YARN-7411. Inter-Queue preemption's computeFixpointAllocation need to 
handle absolute resources while computing normalizedGuarantee. (Sunil G via 
wangda)

Change-Id: I41b1d7558c20fc4eb2050d40134175a2ef6330cb
(cherry picked from commit 034b312d9f19024d2eabd377210d17d4080ef70e)
---
 .../yarn/api/records/impl/pb/ResourcePBImpl.java   | 12 
 .../util/resource/DefaultResourceCalculator.java   |  8 +++
 .../util/resource/DominantResourceCalculator.java  | 21 ++
 .../yarn/util/resource/ResourceCalculator.java | 14 +++-
 .../hadoop/yarn/util/resource/Resources.java   |  5 ++
 .../AbstractPreemptableResourceCalculator.java | 24 ++-
 .../monitor/capacity/TempQueuePerPartition.java| 12 ++--
 ...ionalCapacityPreemptionPolicyMockFramework.java | 14 
 ...lCapacityPreemptionPolicyForNodePartitions.java | 76 ++
 9 files changed, 166 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 401e0c0..4f90133 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -26,7 +26,6 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
@@ -152,17 +151,6 @@ public class ResourcePBImpl extends Resource {
 .newInstance(ResourceInformation.VCORES);
 this.setMemorySize(p.getMemory());
 this.setVirtualCores(p.getVirtualCores());
-
-// Update missing resource information on respective index.
-updateResourceInformationMap(types);
-  }
-
-  private void updateResourceInformationMap(ResourceInformation[] types) {
-for (int i = 0; i < types.length; i++) {
-  if (resources[i] == null) {
-resources[i] = ResourceInformation.newInstance(types[i]);
-  }
-}
   }
 
   private static ResourceInformation newDefaultInformation(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index bdf60bd..58db217 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -112,6 +112,14 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
+  public Resource multiplyAndNormalizeUp(Resource r, double[] by,
+  Resource stepFactor) {
+return Resources.createResource(
+roundUp((long) (r.getMemorySize() * by[0] + 0.5),
+stepFactor.getMemorySize()));
+  }
+
+  @Override
   public Resource multiplyAndNormalizeDown(Resource r, double by,
   Resource stepFactor) {
 return Resources.createResource(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index d64f03e..edd3415 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -496,6 +496,27 @@ public class 

[hadoop] branch branch-2 updated: YARN-7411. Inter-Queue preemption's computeFixpointAllocation need to handle absolute resources while computing normalizedGuarantee. (Sunil G via wangda)

2019-11-15 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new ab83765  YARN-7411. Inter-Queue preemption's computeFixpointAllocation 
need to handle absolute resources while computing normalizedGuarantee. (Sunil G 
via wangda)
ab83765 is described below

commit ab83765a6c587f8378daed741f7d05598d076188
Author: Wangda Tan 
AuthorDate: Mon Nov 13 16:26:27 2017 -0800

YARN-7411. Inter-Queue preemption's computeFixpointAllocation need to 
handle absolute resources while computing normalizedGuarantee. (Sunil G via 
wangda)

Change-Id: I41b1d7558c20fc4eb2050d40134175a2ef6330cb
(cherry picked from commit 034b312d9f19024d2eabd377210d17d4080ef70e)
---
 .../yarn/api/records/impl/pb/ResourcePBImpl.java   | 12 
 .../util/resource/DefaultResourceCalculator.java   |  8 +++
 .../util/resource/DominantResourceCalculator.java  | 21 ++
 .../yarn/util/resource/ResourceCalculator.java | 14 +++-
 .../hadoop/yarn/util/resource/Resources.java   |  5 ++
 .../AbstractPreemptableResourceCalculator.java | 24 ++-
 .../monitor/capacity/TempQueuePerPartition.java| 12 ++--
 ...ionalCapacityPreemptionPolicyMockFramework.java | 14 
 ...lCapacityPreemptionPolicyForNodePartitions.java | 76 ++
 9 files changed, 166 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 401e0c0..4f90133 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -26,7 +26,6 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
@@ -152,17 +151,6 @@ public class ResourcePBImpl extends Resource {
 .newInstance(ResourceInformation.VCORES);
 this.setMemorySize(p.getMemory());
 this.setVirtualCores(p.getVirtualCores());
-
-// Update missing resource information on respective index.
-updateResourceInformationMap(types);
-  }
-
-  private void updateResourceInformationMap(ResourceInformation[] types) {
-for (int i = 0; i < types.length; i++) {
-  if (resources[i] == null) {
-resources[i] = ResourceInformation.newInstance(types[i]);
-  }
-}
   }
 
   private static ResourceInformation newDefaultInformation(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index bdf60bd..58db217 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -112,6 +112,14 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
+  public Resource multiplyAndNormalizeUp(Resource r, double[] by,
+  Resource stepFactor) {
+return Resources.createResource(
+roundUp((long) (r.getMemorySize() * by[0] + 0.5),
+stepFactor.getMemorySize()));
+  }
+
+  @Override
   public Resource multiplyAndNormalizeDown(Resource r, double by,
   Resource stepFactor) {
 return Resources.createResource(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index d64f03e..edd3415 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -496,6 +496,27 @@ public class DominantResourceCalculator 

[hadoop] branch branch-2 updated: HDFS-14979 Allow Balancer to submit getBlocks calls to Observer Nodes when possible. Contributed by Erik Krogen.

2019-11-15 Thread xkrogen
This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 792e2ae  HDFS-14979 Allow Balancer to submit getBlocks calls to 
Observer Nodes when possible. Contributed by Erik Krogen.
792e2ae is described below

commit 792e2aecb3d81ce478f2a44350e4a64351967e52
Author: Erik Krogen 
AuthorDate: Mon Nov 11 14:32:51 2019 -0800

HDFS-14979 Allow Balancer to submit getBlocks calls to Observer Nodes when 
possible. Contributed by Erik Krogen.

(cherry picked from 586defe7113ed246ed0275bb3833882a3d873d70)
(cherry picked from dec765b329d3947f30273c0e7f0c4eb607ec42c9)
(cherry picked from 5cf36aa2b19aad8677e1a7553ff1f05805f772b7)
---
 .../hdfs/server/protocol/NamenodeProtocol.java |  2 ++
 .../balancer/TestBalancerWithHANameNodes.java  | 22 ++
 2 files changed, 24 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
index ccdf516c..f218e8a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly;
 import org.apache.hadoop.io.retry.AtMostOnce;
 import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.KerberosInfo;
@@ -76,6 +77,7 @@ public interface NamenodeProtocol {
datanode does not exist
*/
   @Idempotent
+  @ReadOnly
   public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
   throws IOException;
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index 4a398db..779e19c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -18,8 +18,14 @@
 package org.apache.hadoop.hdfs.server.balancer;
 
 import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
 import java.net.URI;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -32,9 +38,13 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
+import org.mockito.Matchers;
 import org.junit.Test;
 
 /**
@@ -128,12 +138,24 @@ public class TestBalancerWithHANameNodes {
   cluster = qjmhaCluster.getDfsCluster();
   cluster.waitClusterUp();
   cluster.waitActive();
+  List namesystemSpies = new ArrayList<>();
+  for (int i = 0; i < cluster.getNumNameNodes(); i++) {
+namesystemSpies.add(
+NameNodeAdapter.spyOnNamesystem(cluster.getNameNode(i)));
+  }
 
   DistributedFileSystem dfs = HATestUtil.configureObserverReadFs(
   cluster, conf, ObserverReadProxyProvider.class, true);
   client = dfs.getClient().getNamenode();
 
   doTest(conf);
+  for (int i = 0; i < cluster.getNumNameNodes(); i++) {
+// First observer node is at idx 2 so it should get both getBlocks 
calls
+// all other NameNodes should see 0 getBlocks calls
+int expectedCount = (i == 2) ? 2 : 0;
+verify(namesystemSpies.get(i), times(expectedCount))
+.getBlocks(Matchers.any(), anyLong());
+  }
 } finally {
   if (qjmhaCluster != null) {
 qjmhaCluster.shutdown();