[hadoop] branch trunk updated: HADOOP-17904. Test Result Not Working In Jenkins Result. (#3413). Contributed by Ayush Saxena.

2021-09-10 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4d18a2e  HADOOP-17904. Test Result Not Working In Jenkins Result. 
(#3413). Contributed by Ayush Saxena.
4d18a2e is described below

commit 4d18a2eb9990cffd59f2961d0dd4696216bb8708
Author: Ayush Saxena 
AuthorDate: Sat Sep 11 05:15:54 2021 +0530

HADOOP-17904. Test Result Not Working In Jenkins Result. (#3413). 
Contributed by Ayush Saxena.
---
 dev-support/Jenkinsfile | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 80e0572..7896810 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -30,8 +30,10 @@ def publishJUnitResults() {
 boolean surefireReportsExist = findCmdExitCode == 0
 if (surefireReportsExist) {
 echo "XML files found under surefire-reports, running junit"
+// The path should be relative to WORKSPACE for the junit.
+SRC = 
"${SOURCEDIR}/**/target/surefire-reports/*.xml".replace("$WORKSPACE/","")
 try {
-junit "${SOURCEDIR}/**/target/surefire-reports/*.xml"
+junit "${SRC}"
 } catch(e) {
 echo 'junit processing: ' + e.toString()
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-16188. RBF: Router to support resolving monitored namenodes with DNS (#3346) Contributed by Leon Gao

2021-09-10 Thread fengnanli
This is an automated email from the ASF dual-hosted git repository.

fengnanli pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 90bc688  HDFS-16188. RBF: Router to support resolving monitored 
namenodes with DNS (#3346) Contributed by Leon Gao
90bc688 is described below

commit 90bc688c78ae89658b08910edfb77d32415bdba8
Author: LeonGao 
AuthorDate: Fri Sep 10 16:40:08 2021 -0700

HDFS-16188. RBF: Router to support resolving monitored namenodes with DNS 
(#3346) Contributed by Leon Gao

* Router to support resolving monitored namenodes with DNS

* Style

* fix style and test failure

* Add test for NNHAServiceTarget const

* Resolve comments

* Fix test

* Comments and style

* Create a simple function to extract port

* Use LambdaTestUtils.intercept

* fix javadoc

* Trigger Build
---
 .../main/java/org/apache/hadoop/net/NetUtils.java  | 17 
 .../java/org/apache/hadoop/net/TestNetUtils.java   | 13 +++
 .../java/org/apache/hadoop/hdfs/DFSUtilClient.java | 73 ++--
 .../router/NamenodeHeartbeatService.java   | 69 +--
 .../server/federation/router/RBFConfigKeys.java|  6 ++
 .../hdfs/server/federation/router/Router.java  | 48 ++-
 .../src/main/resources/hdfs-rbf-default.xml| 20 +
 .../router/TestRouterNamenodeHeartbeat.java| 67 +++
 .../hadoop/hdfs/tools/NNHAServiceTarget.java   | 97 +++---
 .../hdfs/server/namenode/ha/TestNNHealthCheck.java | 15 
 10 files changed, 358 insertions(+), 67 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index efddc0e..028ba71 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -739,6 +739,23 @@ public class NetUtils {
   public static String getHostPortString(InetSocketAddress addr) {
 return addr.getHostName() + ":" + addr.getPort();
   }
+
+  /**
+   * Get port as integer from host port string like host:port.
+   *
+   * @param addr host + port string like host:port.
+   * @return an integer value representing the port.
+   * @throws IllegalArgumentException if the input is not in the correct 
format.
+   */
+  public static int getPortFromHostPortString(String addr)
+  throws IllegalArgumentException {
+String[] hostport = addr.split(":");
+if (hostport.length != 2) {
+  String errorMsg = "Address should be :, but it is " + addr;
+  throw new IllegalArgumentException(errorMsg);
+}
+return Integer.parseInt(hostport[1]);
+  }
   
   /**
* Checks if {@code host} is a local host name and return {@link InetAddress}
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index 0bf2c44..ad5d8d7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.KerberosAuthException;
 import org.apache.hadoop.security.NetUtilsTestResolver;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -766,6 +767,18 @@ public class TestNetUtils {
   }
 
   @Test
+  public void testGetPortFromHostPortString() throws Exception {
+
+assertEquals(1002, NetUtils.getPortFromHostPortString("testHost:1002"));
+
+LambdaTestUtils.intercept(IllegalArgumentException.class,
+() ->  NetUtils.getPortFromHostPortString("testHost"));
+
+LambdaTestUtils.intercept(IllegalArgumentException.class,
+() ->  NetUtils.getPortFromHostPortString("testHost:randomString"));
+  }
+
+  @Test
   public void testBindToLocalAddress() throws Exception {
 assertNotNull(NetUtils
 .bindToLocalAddress(NetUtils.getLocalInetAddress("127.0.0.1"), false));
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 6b3fa28..6f54de0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -426,38 +426,61 @@ public class DFSUtilClient {
 Collection nnIds = 

[hadoop] branch trunk updated: HADOOP-17901. Performance degradation in Text.append() after HADOOP-1… (#3411)

2021-09-10 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 827e192  HADOOP-17901. Performance degradation in Text.append() after 
HADOOP-1… (#3411)
827e192 is described below

commit 827e19271a8808f1ebdc3f442899d3a70b93505e
Author: pbacsko 
AuthorDate: Sat Sep 11 01:01:37 2021 +0200

HADOOP-17901. Performance degradation in Text.append() after HADOOP-1… 
(#3411)
---
 .../hadoop-common/src/main/java/org/apache/hadoop/io/Text.java| 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index 4915100..f39b1b7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -268,8 +268,7 @@ public class Text extends BinaryComparable
*/
   public void append(byte[] utf8, int start, int len) {
 byte[] original = bytes;
-int capacity = Math.max(length + len, length + (length >> 1));
-if (ensureCapacity(capacity)) {
+if (ensureCapacity(length + len)) {
   System.arraycopy(original, 0, bytes, 0, length);
 }
 System.arraycopy(utf8, start, bytes, length, len);
@@ -302,7 +301,10 @@ public class Text extends BinaryComparable
*/
   private boolean ensureCapacity(final int capacity) {
 if (bytes.length < capacity) {
-  bytes = new byte[capacity];
+  // Try to expand the backing array by the factor of 1.5x
+  // (by taking the current size + diving it by half)
+  int targetSize = Math.max(capacity, bytes.length + (bytes.length >> 1));
+  bytes = new byte[targetSize];
   return true;
 }
 return false;

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-14216. NullPointerException happens in NamenodeWebHdfs. Contributed by lujie.

2021-09-10 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new edc1381  HDFS-14216. NullPointerException happens in NamenodeWebHdfs. 
Contributed by lujie.
edc1381 is described below

commit edc138186f02595847961347a27d77dea5a3607a
Author: Surendra Singh Lilhore 
AuthorDate: Thu Feb 21 20:36:34 2019 +0530

HDFS-14216. NullPointerException happens in NamenodeWebHdfs. Contributed by 
lujie.

(cherry picked from commit 92b53c40f070bbfe65c736f6f3eca721b9d227f5)
(cherry picked from commit 2e939515dfbaf26ca466c8a755cedde0ce4e9c1a)
---
 .../web/resources/NamenodeWebHdfsMethods.java  | 18 +
 .../web/resources/TestWebHdfsDataLocality.java | 23 ++
 2 files changed, 37 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index af71f9c..a6250a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -265,11 +265,21 @@ public class NamenodeWebHdfsMethods {
   for (String host : StringUtils
   .getTrimmedStringCollection(excludeDatanodes)) {
 int idx = host.indexOf(":");
-if (idx != -1) {  
-  excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
-  host.substring(0, idx), Integer.parseInt(host.substring(idx + 
1;
+Node excludeNode = null;
+if (idx != -1) {
+  excludeNode = bm.getDatanodeManager().getDatanodeByXferAddr(
+ host.substring(0, idx), Integer.parseInt(host.substring(idx + 
1)));
 } else {
-  excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
+  excludeNode = bm.getDatanodeManager().getDatanodeByHost(host);
+}
+
+if (excludeNode != null) {
+  excludes.add(excludeNode);
+} else {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("DataNode " + host + " was requested to be excluded, "
++ "but it was not found.");
+  }
 }
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
index 759719d..61e429d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
@@ -239,6 +239,29 @@ public class TestWebHdfsDataLocality {
   }
 
   @Test
+  public void testExcludeWrongDataNode() throws Exception {
+final Configuration conf = WebHdfsTestUtil.createConf();
+final String[] racks = {RACK0};
+final String[] hosts = {"DataNode1"};
+final int nDataNodes = hosts.length;
+
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
+try {
+  cluster.waitActive();
+  final NameNode namenode = cluster.getNameNode();
+  NamenodeWebHdfsMethods.chooseDatanode(
+  namenode, "/path", PutOpParam.Op.CREATE, 0,
+  DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT,
+  "DataNode2", LOCALHOST, null);
+} catch (Exception e) {
+  Assert.fail("Failed to exclude DataNode2" + e.getMessage());
+} finally {
+  cluster.shutdown();
+}
+  }
+
+  @Test
   public void testChooseDatanodeBeforeNamesystemInit() throws Exception {
 NameNode nn = mock(NameNode.class);
 when(nn.getNamesystem()).thenReturn(null);

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-10872. Replace getPropsWithPrefix calls in AutoCreatedQueueTemplate (#3396)

2021-09-10 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 971f1b8  YARN-10872. Replace getPropsWithPrefix calls in 
AutoCreatedQueueTemplate (#3396)
971f1b8 is described below

commit 971f1b8b0a37b4d3eafdecf705afa4f684253e26
Author: Benjamin Teke 
AuthorDate: Fri Sep 10 17:32:42 2021 +0200

YARN-10872. Replace getPropsWithPrefix calls in AutoCreatedQueueTemplate 
(#3396)

Co-authored-by: Benjamin Teke 
---
 .../capacity/AbstractManagedParentQueue.java   | 37 --
 .../capacity/AutoCreatedQueueTemplate.java | 24 --
 .../capacity/CapacitySchedulerConfiguration.java   |  5 +--
 .../capacity/ConfigurationProperties.java  | 12 ++-
 .../capacity/TestConfigurationProperties.java  | 11 +++
 5 files changed, 46 insertions(+), 43 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java
index a9e82a6..9c16de0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java
@@ -28,11 +28,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.Comparator;
 import java.util.Iterator;
 import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
 
 /**
  * A container class for automatically created child leaf queues.
@@ -173,44 +170,22 @@ public abstract class AbstractManagedParentQueue extends 
ParentQueue {
 return queueManagementPolicy;
   }
 
-  protected SortedMap getConfigurationsWithPrefix
-  (SortedMap sortedConfigs, String prefix) {
-return sortedConfigs.subMap( prefix, prefix + Character.MAX_VALUE );
-  }
-
-  protected SortedMap sortCSConfigurations() {
-SortedMap sortedConfigs = new TreeMap(
-new Comparator() {
-  public int compare(String s1, String s2) {
-return s1.compareToIgnoreCase(s2);
-  }
-
-});
-
-for (final Iterator> iterator =
- csContext.getConfiguration().iterator(); iterator.hasNext(); ) {
-  final Map.Entry confKeyValuePair = iterator.next();
-  sortedConfigs.put(confKeyValuePair.getKey(), 
confKeyValuePair.getValue());
-}
-return sortedConfigs;
-  }
-
   protected CapacitySchedulerConfiguration initializeLeafQueueConfigs(String
   configPrefix) {
 
 CapacitySchedulerConfiguration leafQueueConfigs = new
 CapacitySchedulerConfiguration(new Configuration(false), false);
 
-String prefix = YarnConfiguration.RESOURCE_TYPES + ".";
 Map rtProps = csContext
-.getConfiguration().getPropsWithPrefix(prefix);
+.getConfiguration().getConfigurationProperties()
+.getPropertiesWithPrefix(YarnConfiguration.RESOURCE_TYPES + ".", true);
 for (Map.Entry entry : rtProps.entrySet()) {
-  leafQueueConfigs.set(prefix + entry.getKey(), entry.getValue());
+  leafQueueConfigs.set(entry.getKey(), entry.getValue());
 }
 
-SortedMap sortedConfigs = sortCSConfigurations();
-SortedMap templateConfigs = getConfigurationsWithPrefix
-(sortedConfigs, configPrefix);
+Map templateConfigs = csContext
+.getConfiguration().getConfigurationProperties()
+.getPropertiesWithPrefix(configPrefix, true);
 
 for (final Iterator> iterator =
  templateConfigs.entrySet().iterator(); iterator.hasNext(); ) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java
index cb0f789..eff8c4e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java
+++ 

[hadoop] branch trunk updated (b229e5a -> 811fd23)

2021-09-10 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from b229e5a  YARN-10910. AbstractCSQueue#setupQueueConfigs: Separate 
validation logic from initialization logic (#3407)
 add 811fd23  YARN-10852. Optimise CSConfiguration 
getAllUserWeightsForQueue (#3392)

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/conf/Configuration.java |  2 +-
 .../capacity/CapacitySchedulerConfiguration.java   | 34 +++---
 .../scheduler/capacity/TestLeafQueue.java  |  1 +
 3 files changed, 25 insertions(+), 12 deletions(-)

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (29a6f14 -> b229e5a)

2021-09-10 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 29a6f14  YARN-10914. Simplify duplicated code for tracking 
ResourceUsage in AbstractCSQueue (#3402)
 add b229e5a  YARN-10910. AbstractCSQueue#setupQueueConfigs: Separate 
validation logic from initialization logic (#3407)

No new revisions were added by this update.

Summary of changes:
 .../scheduler/capacity/AbstractCSQueue.java| 215 +++--
 1 file changed, 113 insertions(+), 102 deletions(-)

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-10914. Simplify duplicated code for tracking ResourceUsage in AbstractCSQueue (#3402)

2021-09-10 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 29a6f14  YARN-10914. Simplify duplicated code for tracking 
ResourceUsage in AbstractCSQueue (#3402)
29a6f14 is described below

commit 29a6f141d4bcc5307d2ac72c742f87611d7092d5
Author: Tamas Domok 
AuthorDate: Fri Sep 10 15:57:46 2021 +0200

YARN-10914. Simplify duplicated code for tracking ResourceUsage in 
AbstractCSQueue (#3402)

Co-authored-by: Tamas Domok 
---
 .../scheduler/capacity/AbstractCSQueue.java| 97 ++
 1 file changed, 43 insertions(+), 54 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 1f5820d..2da52d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -73,6 +73,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -1189,84 +1190,72 @@ public abstract class AbstractCSQueue implements 
CSQueue {
 
   }
 
+  private static String ensurePartition(String partition) {
+return Optional.ofNullable(partition).orElse(RMNodeLabelsManager.NO_LABEL);
+  }
+
+  @FunctionalInterface
+  interface Counter {
+void count(String partition, Resource resource);
+  }
+
+  @FunctionalInterface
+  interface CounterWithApp {
+void count(String partition, Resource reservedRes, 
SchedulerApplicationAttempt application);
+  }
+
+  private void count(String partition, Resource resource, Counter counter, 
Counter parentCounter) {
+final String checkedPartition = ensurePartition(partition);
+counter.count(checkedPartition, resource);
+Optional.ofNullable(parentCounter).ifPresent(c -> 
c.count(checkedPartition, resource));
+  }
+
+  private void countAndUpdate(String partition, Resource resource,
+  Counter counter, CounterWithApp parentCounter) {
+final String checkedPartition = ensurePartition(partition);
+counter.count(checkedPartition, resource);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(checkedPartition, Resources.none()),
+checkedPartition, this);
+Optional.ofNullable(parentCounter).ifPresent(c -> 
c.count(checkedPartition, resource, null));
+  }
+
   @Override
   public void incReservedResource(String partition, Resource reservedRes) {
-if (partition == null) {
-  partition = RMNodeLabelsManager.NO_LABEL;
-}
-
-queueUsage.incReserved(partition, reservedRes);
-if(null != parent){
-  parent.incReservedResource(partition, reservedRes);
-}
+count(partition, reservedRes, queueUsage::incReserved,
+parent == null ? null : parent::incReservedResource);
   }
 
   @Override
   public void decReservedResource(String partition, Resource reservedRes) {
-if (partition == null) {
-  partition = RMNodeLabelsManager.NO_LABEL;
-}
-
-queueUsage.decReserved(partition, reservedRes);
-if(null != parent){
-  parent.decReservedResource(partition, reservedRes);
-}
+count(partition, reservedRes, queueUsage::decReserved,
+parent == null ? null : parent::decReservedResource);
   }
 
   @Override
   public void incPendingResource(String nodeLabel, Resource resourceToInc) {
-if (nodeLabel == null) {
-  nodeLabel = RMNodeLabelsManager.NO_LABEL;
-}
-// ResourceUsage has its own lock, no addition lock needs here.
-queueUsage.incPending(nodeLabel, resourceToInc);
-if (null != parent) {
-  parent.incPendingResource(nodeLabel, resourceToInc);
-}
+count(nodeLabel, resourceToInc, queueUsage::incPending,
+parent == null ? null : parent::incPendingResource);
   }
 
   @Override
   public void decPendingResource(String nodeLabel, Resource resourceToDec) {
-if (nodeLabel == null) {
-  nodeLabel = RMNodeLabelsManager.NO_LABEL;
-}
-// ResourceUsage has its own lock, no addition lock needs here.
-queueUsage.decPending(nodeLabel, resourceToDec);
-if (null != parent) {
-  parent.decPendingResource(nodeLabel, resourceToDec);
-}
+