[hadoop] branch trunk updated: HDDS-1334. Fix asf license errors in newly added files by HDDS-1234. Contributed by Aravindan Vijayan.

2019-03-25 Thread yqlin
This is an automated email from the ASF dual-hosted git repository.

yqlin pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c99b107  HDDS-1334. Fix asf license errors in newly added files by 
HDDS-1234. Contributed by Aravindan Vijayan.
c99b107 is described below

commit c99b107772f4a52832bafd3a4c23fdef8015fdea
Author: Yiqun Lin 
AuthorDate: Tue Mar 26 11:51:04 2019 +0800

HDDS-1334. Fix asf license errors in newly added files by HDDS-1234. 
Contributed by Aravindan Vijayan.
---
 .../ozone/recon/AbstractOMMetadataManagerTest.java | 18 ++
 1 file changed, 18 insertions(+)

diff --git 
a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
 
b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
index b58e225..d115891 100644
--- 
a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
+++ 
b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.hadoop.ozone.recon;
 
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14359. Inherited ACL permissions masked when parent directory does not exist (mkdir -p) (Contributed by Stephen O'Donnell via Daniel Templeton)

2019-03-25 Thread templedf
This is an automated email from the ASF dual-hosted git repository.

templedf pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3f6d6d2  HDFS-14359. Inherited ACL permissions masked when parent 
directory does not exist (mkdir -p) (Contributed by Stephen O'Donnell via 
Daniel Templeton)
3f6d6d2 is described below

commit 3f6d6d28119049b003cb81735ce675e52d0d2104
Author: Stephen O'Donnell 
AuthorDate: Mon Mar 25 16:16:13 2019 -0700

HDFS-14359. Inherited ACL permissions masked when parent directory does not 
exist (mkdir -p)
(Contributed by Stephen O'Donnell via Daniel Templeton)

Change-Id: Ia83f799a8f56aa8057a967b234f184683395fa41
---
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  | 18 +++--
 .../hadoop/hdfs/server/namenode/FSAclBaseTest.java |  2 +-
 .../testAclCLIWithPosixAclInheritance.xml  | 77 ++
 3 files changed, 92 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 2f0a0fc..95e8898 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.fs.permission.FsCreateModes;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
@@ -187,10 +188,19 @@ class FSDirMkdirOp {
   private static PermissionStatus addImplicitUwx(PermissionStatus parentPerm,
   PermissionStatus perm) {
 FsPermission p = parentPerm.getPermission();
-FsPermission ancestorPerm = new FsPermission(
-p.getUserAction().or(FsAction.WRITE_EXECUTE),
-p.getGroupAction(),
-p.getOtherAction());
+FsPermission ancestorPerm;
+if (p.getUnmasked() == null) {
+  ancestorPerm = new FsPermission(
+  p.getUserAction().or(FsAction.WRITE_EXECUTE),
+  p.getGroupAction(),
+  p.getOtherAction());
+} else {
+  ancestorPerm = FsCreateModes.create(
+  new FsPermission(
+p.getUserAction().or(FsAction.WRITE_EXECUTE),
+p.getGroupAction(),
+p.getOtherAction()), p.getUnmasked());
+}
 return new PermissionStatus(perm.getUserName(), perm.getGroupName(),
 ancestorPerm);
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index ee92217..fd50648 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -1150,7 +1150,7 @@ public abstract class FSAclBaseTest {
 AclStatus s = fs.getAclStatus(dirPath);
 AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
 assertArrayEquals(expected, returned);
-assertPermission(dirPath, (short)010750);
+assertPermission(dirPath, (short)010770);
 assertAclFeature(dirPath, true);
 s = fs.getAclStatus(subdirPath);
 returned = s.getEntries().toArray(new AclEntry[0]);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLIWithPosixAclInheritance.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLIWithPosixAclInheritance.xml
index 7e9ace1..9ff71b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLIWithPosixAclInheritance.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLIWithPosixAclInheritance.xml
@@ -741,6 +741,83 @@
   
 
 
+  
+  setfacl : check inherit default ACL to ancestor dir with 
mkdir -p
+  
+-fs NAMENODE -mkdir /dir1
+-fs NAMENODE -setfacl -m 
default:user:charlie:r-x,default:group:admin:rwx /dir1
+-fs NAMENODE -mkdir -p /dir1/dir2/dir3
+-fs NAMENODE -getfacl /dir1/dir2
+  
+  
+-fs NAMENODE -rm -R /dir1
+  
+  
+
+  SubstringComparator
+  # file: /dir1/dir2
+
+
+  SubstringComparator
+  # owner: USERNAME
+
+
+  SubstringComparator
+  # group: supergroup
+
+
+  SubstringComparator
+  user::rwx
+
+
+  
+  RegexpComparator
+  ^user:charlie:r-x$
+
+
+  SubstringComparator
+  group::r-x
+
+
+  
+  

[hadoop] branch YARN-8200.branch3 updated (6eefb78 -> 3a121a7)

2019-03-25 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a change to branch YARN-8200.branch3
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 discard 6eefb78  YARN-9272

This update removed existing revisions from the reference, leaving the
reference pointing at a previous point in the repository history.

 * -- * -- N   refs/heads/YARN-8200.branch3 (3a121a7)
\
 O -- O -- O   (6eefb78)

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

No new revisions were added by this update.

Summary of changes:
 .../yarn/api/records/ResourceInformation.java  |   6 -
 .../hadoop/yarn/util/resource/ResourceUtils.java   |  40 ++---
 .../yarn/server/resourcemanager/AdminService.java  |  22 +--
 .../scheduler/capacity/CapacityScheduler.java  |  10 +-
 .../capacity/CapacitySchedulerConfiguration.java   |  22 ++-
 .../scheduler/capacity/LeafQueue.java  |   4 +-
 .../yarn/server/resourcemanager/RMHATestBase.java  |  29 +---
 .../scheduler/capacity/TestCapacityScheduler.java  |  11 +-
 ...estCapacitySchedulerWithMultiResourceTypes.java | 190 -
 9 files changed, 42 insertions(+), 292 deletions(-)
 delete mode 100644 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9405. Fixed flaky tests in TestYarnNativeServices. Contributed by Prabhu Joseph

2019-03-25 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 710cbc9  YARN-9405.  Fixed flaky tests in TestYarnNativeServices.  
   Contributed by Prabhu Joseph
710cbc9 is described below

commit 710cbc9bd649123cb0f742e4a91a6a216cb1ac76
Author: Eric Yang 
AuthorDate: Mon Mar 25 16:34:04 2019 -0400

YARN-9405.  Fixed flaky tests in TestYarnNativeServices.
Contributed by Prabhu Joseph
---
 .../hadoop/yarn/service/ServiceTestUtils.java  |  6 ++
 .../yarn/service/TestYarnNativeServices.java   | 22 --
 2 files changed, 26 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
index b3ba58d..a37ec75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
@@ -535,6 +535,12 @@ public class ServiceTestUtils {
 waitForServiceToBeInState(client, exampleApp, ServiceState.STARTED);
   }
 
+  protected void waitForServiceToBeExpressUpgrading(ServiceClient client,
+  Service exampleApp) throws TimeoutException, InterruptedException {
+waitForServiceToBeInState(client, exampleApp,
+ServiceState.EXPRESS_UPGRADING);
+  }
+
   protected void waitForServiceToBeInState(ServiceClient client,
   Service exampleApp, ServiceState desiredState) throws TimeoutException,
   InterruptedException {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index a22ada4..6c38511 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -439,6 +439,8 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 component2.getConfiguration().getEnv().put("key2", "val2");
 client.actionUpgradeExpress(service);
 
+waitForServiceToBeExpressUpgrading(client, service);
+
 // wait for upgrade to complete
 waitForServiceToBeStable(client, service);
 Service active = client.getStatus(service.getName());
@@ -859,16 +861,32 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
   private void checkCompInstancesInOrder(ServiceClient client,
   Service exampleApp) throws IOException, YarnException,
   TimeoutException, InterruptedException {
+waitForContainers(client, exampleApp);
 Service service = client.getStatus(exampleApp.getName());
 for (Component comp : service.getComponents()) {
   checkEachCompInstancesInOrder(comp, exampleApp.getName());
 }
   }
 
+  private void waitForContainers(ServiceClient client, Service exampleApp)
+  throws TimeoutException, InterruptedException {
+GenericTestUtils.waitFor(() -> {
+  try {
+Service service = client.getStatus(exampleApp.getName());
+for (Component comp : service.getComponents()) {
+  if (comp.getContainers().size() != comp.getNumberOfContainers()) {
+return false;
+  }
+}
+return true;
+  } catch (Exception e) {
+return false;
+  }
+}, 2000, 20);
+  }
+
   private void checkEachCompInstancesInOrder(Component component, String
   serviceName) throws TimeoutException, InterruptedException {
-long expectedNumInstances = component.getNumberOfContainers();
-Assert.assertEquals(expectedNumInstances, 
component.getContainers().size());
 TreeSet instances = new TreeSet<>();
 for (Container container : component.getContainers()) {
   instances.add(container.getComponentInstanceName());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9268. General improvements in FpgaDevice. Contributed by Peter Bacsko.

2019-03-25 Thread devaraj
This is an automated email from the ASF dual-hosted git repository.

devaraj pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new eeda689  YARN-9268. General improvements in FpgaDevice. Contributed by 
Peter Bacsko.
eeda689 is described below

commit eeda6891e49bc13ae86d0193f94238b7109e291d
Author: Devaraj K 
AuthorDate: Mon Mar 25 13:22:53 2019 -0700

YARN-9268. General improvements in FpgaDevice. Contributed by Peter Bacsko.
---
 .../resources/fpga/FpgaResourceAllocator.java  | 133 +
 .../fpga/AoclDiagnosticOutputParser.java   |   4 +-
 .../resourceplugin/fpga/FpgaDiscoverer.java|   2 +-
 .../fpga/discovery/DeviceSpecParser.java   |   7 +-
 .../resources/fpga/TestFpgaResourceHandler.java|  32 ++---
 .../resourceplugin/fpga/TestAoclOutputParser.java  |  24 +---
 .../resourceplugin/fpga/TestFpgaDiscoverer.java|  17 ++-
 7 files changed, 88 insertions(+), 131 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
index e5622f9..b64ffd0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
@@ -21,6 +21,7 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
 
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -51,7 +52,7 @@ public class FpgaResourceAllocator {
   //key is resource type of FPGA, vendor plugin supported ID
   private LinkedHashMap> availableFpga = new 
LinkedHashMap<>();
 
-  //key is requetor, aka. container ID
+  //key is requestor, aka. container ID
   private LinkedHashMap> usedFpgaByRequestor = new 
LinkedHashMap<>();
 
   private Context nmContext;
@@ -133,35 +134,33 @@ public class FpgaResourceAllocator {
 }
   }
 
-  public static class FpgaDevice implements Comparable, 
Serializable {
+  /** A class that represents an FPGA card. */
+  public static class FpgaDevice implements Serializable {
+private static final long serialVersionUID = -4678487141824092751L;
+private final String type;
+private final int major;
+private final int minor;
 
-private static final long serialVersionUID = 1L;
+// the alias device name. Intel use acl number acl0 to acl31
+private final String aliasDevName;
 
-private String type;
-private Integer major;
-private Integer minor;
-// IP file identifier. matrix multiplication for instance
+// IP file identifier. matrix multiplication for instance (mutable)
 private String IPID;
-// SHA-256 hash of the uploaded aocx file
+// SHA-256 hash of the uploaded aocx file (mutable)
 private String aocxHash;
-// the device name under /dev
-private String devName;
-// the alias device name. Intel use acl number acl0 to acl31
-private String aliasDevName;
-// lspci output's bus number: 02:00.00 (bus:slot.func)
-private String busNum;
-private String temperature;
-private String cardPowerUsage;
+
+// cached hash value
+private Integer hashCode;
 
 public String getType() {
   return type;
 }
 
-public Integer getMajor() {
+public int getMajor() {
   return major;
 }
 
-public Integer getMinor() {
+public int getMinor() {
   return minor;
 }
 
@@ -181,57 +180,16 @@ public class FpgaResourceAllocator {
   this.IPID = IPID;
 }
 
-public String getDevName() {
-  return devName;
-}
-
-public void setDevName(String devName) {
-  this.devName = devName;
-}
-
 public String getAliasDevName() {
   return aliasDevName;
 }
 
-public void setAliasDevName(String aliasDevName) {
-  this.aliasDevName = aliasDevName;
-}
-
-public String getBusNum() {
-  return busNum;
-}
-
-public void setBusNum(String busNum) {
-  this.busNum = busNum;
-}
-
-public String getTemperature() {
-  return temperature;
-}
-
-public String getCardPowerUsage() {
-  return cardPowerUsage;
-}
-
-public FpgaDevice(String type, Integer major, Integer 

[hadoop] branch trunk updated: HDDS-1217. Refactor ChillMode rules and chillmode manager. (#558)

2019-03-25 Thread nanda
This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8739693  HDDS-1217. Refactor ChillMode rules and chillmode manager. 
(#558)
8739693 is described below

commit 8739693514ac92c33b38e472c37b7dcf4febe73f
Author: Bharat Viswanadham 
AuthorDate: Mon Mar 25 13:11:03 2019 -0700

HDDS-1217. Refactor ChillMode rules and chillmode manager. (#558)
---
 .../hdds/scm/chillmode/ChillModeExitRule.java  |  88 +-
 .../hdds/scm/chillmode/ContainerChillModeRule.java |  70 
 .../hdds/scm/chillmode/DataNodeChillModeRule.java  |  60 +++
 .../chillmode/HealthyPipelineChillModeRule.java| 114 +++--
 .../chillmode/OneReplicaPipelineChillModeRule.java |  83 +
 .../hdds/scm/chillmode/SCMChillModeManager.java|  92 +++---
 .../scm/chillmode/TestSCMChillModeManager.java | 190 -
 7 files changed, 497 insertions(+), 200 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
index d283dfe..0c9b823 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModeExitRule.java
@@ -17,16 +17,94 @@
  */
 package org.apache.hadoop.hdds.scm.chillmode;
 
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
+
 /**
- * Interface for defining chill mode exit rules.
+ * Abstract class for ChillModeExitRules. When a new rule is added, the new
+ * rule should extend this abstract class.
+ *
+ * Each rule Should do:
+ * 1. Should add a handler for the event it is looking for during the
+ * initialization of the rule.
+ * 2. Add the rule in ScmChillModeManager to list of the rules.
+ *
  *
  * @param 
  */
-public interface ChillModeExitRule {
+public abstract class ChillModeExitRule implements EventHandler {
+
+  private final SCMChillModeManager chillModeManager;
+  private final String ruleName;
+
+  public ChillModeExitRule(SCMChillModeManager chillModeManager,
+  String ruleName, EventQueue eventQueue) {
+this.chillModeManager = chillModeManager;
+this.ruleName = ruleName;
+eventQueue.addHandler(getEventType(), this);
+  }
+
+  /**
+   * Return's the name of this ChillModeExit Rule.
+   * @return ruleName
+   */
+  public String getRuleName() {
+return ruleName;
+  }
+
+  /**
+   * Return's the event type this chillMode exit rule handles.
+   * @return TypedEvent
+   */
+  protected abstract TypedEvent getEventType();
+
+  /**
+   * Validate's this rule. If this rule condition is met, returns true, else
+   * returns false.
+   * @return boolean
+   */
+  protected abstract boolean validate();
+
+  /**
+   * Actual processing logic for this rule.
+   * @param report
+   */
+  protected abstract void process(T report);
+
+  /**
+   * Cleanup action's need to be done, once this rule is satisfied.
+   */
+  protected abstract void cleanup();
+
+  @Override
+  public final void onMessage(T report, EventPublisher publisher) {
+
+// TODO: when we have remove handlers, we can remove getInChillmode check
+
+if (scmInChillMode()) {
+  if (validate()) {
+chillModeManager.validateChillModeExitRules(ruleName, publisher);
+cleanup();
+return;
+  }
+
+  process(report);
 
-  boolean validate();
+  if (validate()) {
+chillModeManager.validateChillModeExitRules(ruleName, publisher);
+cleanup();
+  }
+}
+  }
 
-  void process(T report);
+  /**
+   * Return true if SCM is in chill mode, else false.
+   * @return boolean
+   */
+  protected boolean scmInChillMode() {
+return chillModeManager.getInChillMode();
+  }
 
-  void cleanup();
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ContainerChillModeRule.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ContainerChillModeRule.java
index 17dd496..cd08786 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ContainerChillModeRule.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ContainerChillModeRule.java
@@ -22,22 +22,24 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import 

[hadoop] branch branch-3.1 updated: YARN-9391. Fixed node manager environment leaks into Docker containers. Contributed by Jim Brennan

2019-03-25 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new dbc02bc  YARN-9391.  Fixed node manager environment leaks into Docker 
containers. Contributed by Jim Brennan
dbc02bc is described below

commit dbc02bcda7ab8db8a27a4f94391e5337af59a2be
Author: Eric Yang 
AuthorDate: Mon Mar 25 15:53:24 2019 -0400

YARN-9391.  Fixed node manager environment leaks into Docker containers.
Contributed by Jim Brennan

(cherry picked from commit 3c45762a0bfb403e069a03e30d35dd11432ee8b0)
---
 .../hadoop/yarn/server/nodemanager/ContainerExecutor.java  | 10 --
 .../containermanager/launcher/TestContainerLaunch.java |  5 +
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 9714731..dbee048 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -403,16 +403,6 @@ public abstract class ContainerExecutor implements 
Configurable {
   sb.env(env.getKey(), env.getValue());
 }
   }
-  // Add the whitelist vars to the environment.  Do this after writing
-  // environment variables so they are not written twice.
-  for(String var : whitelistVars) {
-if (!environment.containsKey(var)) {
-  String val = getNMEnvVar(var);
-  if (val != null) {
-environment.put(var, val);
-  }
-}
-  }
 }
 
 if (resources != null) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index ab5d47e..9cfa6a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -468,10 +468,15 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 for (String envVar : env.keySet()) {
   Assert.assertTrue(shellContent.contains(envVar + "="));
 }
+// The whitelist vars should not have been added to env
+// They should only be in the launch script
 for (String wlVar : whitelistVars) {
+  Assert.assertFalse(env.containsKey(wlVar));
   Assert.assertTrue(shellContent.contains(wlVar + "="));
 }
+// Non-whitelist nm vars should be in neither env nor in launch script
 for (String nwlVar : nonWhiteListEnv) {
+  Assert.assertFalse(env.containsKey(nwlVar));
   Assert.assertFalse(shellContent.contains(nwlVar + "="));
 }
 // Explicitly Set NM vars should be before user vars


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9391. Fixed node manager environment leaks into Docker containers. Contributed by Jim Brennan

2019-03-25 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 10642a6  YARN-9391.  Fixed node manager environment leaks into Docker 
containers. Contributed by Jim Brennan
10642a6 is described below

commit 10642a6205f8f2189eef56836a3f9208da4be8cb
Author: Eric Yang 
AuthorDate: Mon Mar 25 15:53:24 2019 -0400

YARN-9391.  Fixed node manager environment leaks into Docker containers.
Contributed by Jim Brennan

(cherry picked from commit 3c45762a0bfb403e069a03e30d35dd11432ee8b0)
---
 .../hadoop/yarn/server/nodemanager/ContainerExecutor.java  | 10 --
 .../containermanager/launcher/TestContainerLaunch.java |  5 +
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 98cc2a4..3fa7321 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -406,16 +406,6 @@ public abstract class ContainerExecutor implements 
Configurable {
   sb.env(env.getKey(), env.getValue());
 }
   }
-  // Add the whitelist vars to the environment.  Do this after writing
-  // environment variables so they are not written twice.
-  for(String var : whitelistVars) {
-if (!environment.containsKey(var)) {
-  String val = getNMEnvVar(var);
-  if (val != null) {
-environment.put(var, val);
-  }
-}
-  }
 }
 
 if (resources != null) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 1f7df56..b240f88 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -468,10 +468,15 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 for (String envVar : env.keySet()) {
   Assert.assertTrue(shellContent.contains(envVar + "="));
 }
+// The whitelist vars should not have been added to env
+// They should only be in the launch script
 for (String wlVar : whitelistVars) {
+  Assert.assertFalse(env.containsKey(wlVar));
   Assert.assertTrue(shellContent.contains(wlVar + "="));
 }
+// Non-whitelist nm vars should be in neither env nor in launch script
 for (String nwlVar : nonWhiteListEnv) {
+  Assert.assertFalse(env.containsKey(nwlVar));
   Assert.assertFalse(shellContent.contains(nwlVar + "="));
 }
 // Explicitly Set NM vars should be before user vars


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9391. Fixed node manager environment leaks into Docker containers. Contributed by Jim Brennan

2019-03-25 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3c45762  YARN-9391.  Fixed node manager environment leaks into Docker 
containers. Contributed by Jim Brennan
3c45762 is described below

commit 3c45762a0bfb403e069a03e30d35dd11432ee8b0
Author: Eric Yang 
AuthorDate: Mon Mar 25 15:53:24 2019 -0400

YARN-9391.  Fixed node manager environment leaks into Docker containers.
Contributed by Jim Brennan
---
 .../hadoop/yarn/server/nodemanager/ContainerExecutor.java  | 10 --
 .../containermanager/launcher/TestContainerLaunch.java |  5 +
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 61e4364..55836c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -431,16 +431,6 @@ public abstract class ContainerExecutor implements 
Configurable {
   sb.env(env.getKey(), env.getValue());
 }
   }
-  // Add the whitelist vars to the environment.  Do this after writing
-  // environment variables so they are not written twice.
-  for(String var : whitelistVars) {
-if (!environment.containsKey(var)) {
-  String val = getNMEnvVar(var);
-  if (val != null) {
-environment.put(var, val);
-  }
-}
-  }
 }
 
 if (resources != null) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index c1f4268..e048577 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -476,10 +476,15 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 for (String envVar : env.keySet()) {
   Assert.assertTrue(shellContent.contains(envVar + "="));
 }
+// The whitelist vars should not have been added to env
+// They should only be in the launch script
 for (String wlVar : whitelistVars) {
+  Assert.assertFalse(env.containsKey(wlVar));
   Assert.assertTrue(shellContent.contains(wlVar + "="));
 }
+// Non-whitelist nm vars should be in neither env nor in launch script
 for (String nwlVar : nonWhiteListEnv) {
+  Assert.assertFalse(env.containsKey(nwlVar));
   Assert.assertFalse(shellContent.contains(nwlVar + "="));
 }
 // Explicitly Set NM vars should be before user vars


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch YARN-8200.branch3 updated (e628d62 -> 6eefb78)

2019-03-25 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a change to branch YARN-8200.branch3
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


 discard e628d62  YARN-9272
 new 6eefb78  YARN-9272

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (e628d62)
\
 N -- N -- N   refs/heads/YARN-8200.branch3 (6eefb78)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../yarn/server/resourcemanager/RMHATestBase.java  | 29 --
 1 file changed, 27 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: YARN-9272

2019-03-25 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200.branch3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 6eefb781d713a978c14b6e647bb1e60e177d4ca3
Author: Jonathan Hung 
AuthorDate: Mon Mar 25 12:04:30 2019 -0700

YARN-9272
---
 .../yarn/api/records/ResourceInformation.java  |   6 +
 .../hadoop/yarn/util/resource/ResourceUtils.java   |  40 +++--
 .../yarn/server/resourcemanager/AdminService.java  |  22 ++-
 .../scheduler/capacity/CapacityScheduler.java  |  10 +-
 .../capacity/CapacitySchedulerConfiguration.java   |  22 +--
 .../scheduler/capacity/LeafQueue.java  |   4 +-
 .../yarn/server/resourcemanager/RMHATestBase.java  |  29 +++-
 .../scheduler/capacity/TestCapacityScheduler.java  |  11 +-
 ...estCapacitySchedulerWithMultiResourceTypes.java | 190 +
 9 files changed, 292 insertions(+), 42 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 67592cc..a4c1f6c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -225,6 +225,12 @@ public class ResourceInformation implements 
Comparable {
 Long.MAX_VALUE);
   }
 
+  public static ResourceInformation newInstance(String name, String units,
+  long minRes, long maxRes) {
+return ResourceInformation.newInstance(name, units, 0L,
+ResourceTypes.COUNTABLE, minRes, maxRes);
+  }
+
   public static ResourceInformation newInstance(String name, long value) {
 return ResourceInformation
 .newInstance(name, "", value, ResourceTypes.COUNTABLE, 0L,
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index b945183..6e8eb81 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -201,9 +201,23 @@ public class ResourceUtils {
 }
   }
 
-  @VisibleForTesting
-  static void initializeResourcesMap(Configuration conf) {
+  /**
+   * Get maximum allocation from config, *THIS WILL NOT UPDATE INTERNAL DATA*
+   * @param conf config
+   * @return maximum allocation
+   */
+  public static Resource fetchMaximumAllocationFromConfig(Configuration conf) {
+Map resourceInformationMap =
+getResourceInformationMapFromConfig(conf);
+Resource ret = Resource.newInstance(0, 0);
+for (ResourceInformation entry : resourceInformationMap.values()) {
+  ret.setResourceValue(entry.getName(), entry.getMaximumAllocation());
+}
+return ret;
+  }
 
+  private static Map 
getResourceInformationMapFromConfig(
+  Configuration conf) {
 Map resourceInformationMap = new HashMap<>();
 String[] resourceNames = conf.getStrings(YarnConfiguration.RESOURCE_TYPES);
 
@@ -249,6 +263,13 @@ public class ResourceUtils {
 
 setAllocationForMandatoryResources(resourceInformationMap, conf);
 
+return resourceInformationMap;
+  }
+
+  @VisibleForTesting
+  static void initializeResourcesMap(Configuration conf) {
+Map resourceInformationMap =
+getResourceInformationMapFromConfig(conf);
 initializeResourcesFromResourceInformationMap(resourceInformationMap);
   }
 
@@ -546,19 +567,8 @@ public class ResourceUtils {
   public static Resource getResourceTypesMaximumAllocation() {
 Resource ret = Resource.newInstance(0, 0);
 for (ResourceInformation entry : resourceTypesArray) {
-  String name = entry.getName();
-  if (name.equals(ResourceInformation.MEMORY_MB.getName())) {
-ret.setMemorySize(entry.getMaximumAllocation());
-  } else if (name.equals(ResourceInformation.VCORES.getName())) {
-Long tmp = entry.getMaximumAllocation();
-if (tmp > Integer.MAX_VALUE) {
-  tmp = (long) Integer.MAX_VALUE;
-}
-ret.setVirtualCores(tmp.intValue());
-continue;
-  } else {
-ret.setResourceValue(name, entry.getMaximumAllocation());
-  }
+  ret.setResourceValue(entry.getName(),
+  entry.getMaximumAllocation());
 }
 return ret;
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 

[hadoop] branch YARN-8200.branch3 updated: YARN-9272

2019-03-25 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch YARN-8200.branch3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/YARN-8200.branch3 by this push:
 new e628d62  YARN-9272
e628d62 is described below

commit e628d6286f51e89035aa33d1db5b89bde1572453
Author: Jonathan Hung 
AuthorDate: Thu Mar 21 13:10:30 2019 -0700

YARN-9272
---
 .../yarn/api/records/ResourceInformation.java  |   6 +
 .../hadoop/yarn/util/resource/ResourceUtils.java   |  40 +++--
 .../yarn/server/resourcemanager/AdminService.java  |  22 ++-
 .../scheduler/capacity/CapacityScheduler.java  |  10 +-
 .../capacity/CapacitySchedulerConfiguration.java   |  22 +--
 .../scheduler/capacity/LeafQueue.java  |   4 +-
 .../scheduler/capacity/TestCapacityScheduler.java  |  11 +-
 ...estCapacitySchedulerWithMultiResourceTypes.java | 190 +
 8 files changed, 265 insertions(+), 40 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 67592cc..a4c1f6c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -225,6 +225,12 @@ public class ResourceInformation implements 
Comparable {
 Long.MAX_VALUE);
   }
 
+  public static ResourceInformation newInstance(String name, String units,
+  long minRes, long maxRes) {
+return ResourceInformation.newInstance(name, units, 0L,
+ResourceTypes.COUNTABLE, minRes, maxRes);
+  }
+
   public static ResourceInformation newInstance(String name, long value) {
 return ResourceInformation
 .newInstance(name, "", value, ResourceTypes.COUNTABLE, 0L,
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index b945183..6e8eb81 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -201,9 +201,23 @@ public class ResourceUtils {
 }
   }
 
-  @VisibleForTesting
-  static void initializeResourcesMap(Configuration conf) {
+  /**
+   * Get maximum allocation from config, *THIS WILL NOT UPDATE INTERNAL DATA*
+   * @param conf config
+   * @return maximum allocation
+   */
+  public static Resource fetchMaximumAllocationFromConfig(Configuration conf) {
+Map resourceInformationMap =
+getResourceInformationMapFromConfig(conf);
+Resource ret = Resource.newInstance(0, 0);
+for (ResourceInformation entry : resourceInformationMap.values()) {
+  ret.setResourceValue(entry.getName(), entry.getMaximumAllocation());
+}
+return ret;
+  }
 
+  private static Map 
getResourceInformationMapFromConfig(
+  Configuration conf) {
 Map resourceInformationMap = new HashMap<>();
 String[] resourceNames = conf.getStrings(YarnConfiguration.RESOURCE_TYPES);
 
@@ -249,6 +263,13 @@ public class ResourceUtils {
 
 setAllocationForMandatoryResources(resourceInformationMap, conf);
 
+return resourceInformationMap;
+  }
+
+  @VisibleForTesting
+  static void initializeResourcesMap(Configuration conf) {
+Map resourceInformationMap =
+getResourceInformationMapFromConfig(conf);
 initializeResourcesFromResourceInformationMap(resourceInformationMap);
   }
 
@@ -546,19 +567,8 @@ public class ResourceUtils {
   public static Resource getResourceTypesMaximumAllocation() {
 Resource ret = Resource.newInstance(0, 0);
 for (ResourceInformation entry : resourceTypesArray) {
-  String name = entry.getName();
-  if (name.equals(ResourceInformation.MEMORY_MB.getName())) {
-ret.setMemorySize(entry.getMaximumAllocation());
-  } else if (name.equals(ResourceInformation.VCORES.getName())) {
-Long tmp = entry.getMaximumAllocation();
-if (tmp > Integer.MAX_VALUE) {
-  tmp = (long) Integer.MAX_VALUE;
-}
-ret.setVirtualCores(tmp.intValue());
-continue;
-  } else {
-ret.setResourceValue(name, entry.getMaximumAllocation());
-  }
+  ret.setResourceValue(entry.getName(),
+  entry.getMaximumAllocation());
 }
 return ret;
   }
diff --git 

[hadoop] branch trunk updated: HDDS-1234. Iterate the OM DB snapshot and populate the recon container DB. Contributed by Aravindan Vijayan.

2019-03-25 Thread yqlin
This is an automated email from the ASF dual-hosted git repository.

yqlin pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e5d72f5  HDDS-1234. Iterate the OM DB snapshot and populate the recon 
container DB. Contributed by Aravindan Vijayan.
e5d72f5 is described below

commit e5d72f504e2cf932657f96797623f3a5bbd71f4b
Author: Yiqun Lin 
AuthorDate: Mon Mar 25 22:52:02 2019 +0800

HDDS-1234. Iterate the OM DB snapshot and populate the recon container DB. 
Contributed by Aravindan Vijayan.
---
 .../apache/hadoop/utils/LevelDBStoreIterator.java  |   4 -
 .../org/apache/hadoop/utils/MetaStoreIterator.java |   5 -
 .../apache/hadoop/utils/RocksDBStoreIterator.java  |   5 -
 .../java/org/apache/hadoop/utils/db/DBStore.java   |   6 +
 .../org/apache/hadoop/utils/db/IntegerCodec.java   |  28 ++-
 .../java/org/apache/hadoop/utils/db/RDBStore.java  |   4 +-
 .../org/apache/hadoop/utils/TestMetadataStore.java |  51 -
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   4 +
 .../hadoop/ozone/recon/ReconControllerModule.java  |   6 +-
 .../org/apache/hadoop/ozone/recon/ReconServer.java |  48 -
 .../ozone/recon/api/ContainerKeyService.java   |  77 +++-
 .../ozone/recon/api/types/ContainerKeyPrefix.java  |  41 +++-
 .../hadoop/ozone/recon/api/types/KeyMetadata.java  |  74 ---
 .../recon/recovery/ReconOmMetadataManagerImpl.java |   4 +-
 .../recon/spi/ContainerDBServiceProvider.java  |  13 +-
 .../recon/spi/OzoneManagerServiceProvider.java |   9 +-
 .../spi/impl/ContainerDBServiceProviderImpl.java   | 116 ++-
 .../recon/spi/impl/ContainerKeyPrefixCodec.java|  87 +
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |  49 ++---
 .../spi/{ => impl}/ReconContainerDBProvider.java   |  62 +++---
 .../ozone/recon/tasks/ContainerKeyMapperTask.java  | 107 ++
 .../package-info.java} |  21 +-
 .../ozone/recon/AbstractOMMetadataManagerTest.java | 172 
 .../apache/hadoop/ozone/recon/TestReconCodecs.java |  58 ++
 .../apache/hadoop/ozone/recon/TestReconUtils.java  |   4 +-
 .../ozone/recon/api/TestContainerKeyService.java   | 216 +
 .../hadoop/ozone/recon/api/package-info.java}  |  20 +-
 .../impl/TestContainerDBServiceProviderImpl.java   | 141 ++
 .../impl/TestOzoneManagerServiceProviderImpl.java  | 111 +--
 .../spi/impl/TestReconContainerDBProvider.java |  87 +
 .../recon/tasks/TestContainerKeyMapperTask.java| 194 ++
 .../hadoop/ozone/recon/tasks/package-info.java}|  21 +-
 32 files changed, 1416 insertions(+), 429 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java
index 92051dd..cd07b64 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java
@@ -62,8 +62,4 @@ public class LevelDBStoreIterator implements 
MetaStoreIterator {
 levelDBIterator.seekToLast();
   }
 
-  @Override
-  public void prefixSeek(byte[] prefix) {
-levelDBIterator.seek(prefix);
-  }
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
index 15ded0d..52d0a3e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
@@ -36,9 +36,4 @@ public interface MetaStoreIterator extends Iterator {
*/
   void seekToLast();
 
-  /**
-   * seek with prefix.
-   */
-  void prefixSeek(byte[] prefix);
-
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java
index 161d5de..6e9b695 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java
@@ -63,9 +63,4 @@ public class RocksDBStoreIterator implements 
MetaStoreIterator {
 rocksDBIterator.seekToLast();
   }
 
-  @Override
-  public void prefixSeek(byte[] prefix) {
-rocksDBIterator.seek(prefix);
-  }
-
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
index d55daa2..0bc30d0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.utils.db;
 
+import java.io.File;
 

[hadoop] branch trunk updated: HDDS-1185. Optimize GetFileStatus in OzoneFileSystem by reducing the number of rpc call to OM. Contributed by Mukul Kumar Singh.

2019-03-25 Thread msingh
This is an automated email from the ASF dual-hosted git repository.

msingh pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 67dd45f  HDDS-1185. Optimize GetFileStatus in OzoneFileSystem by 
reducing the number of rpc call to OM. Contributed by Mukul Kumar Singh.
67dd45f is described below

commit 67dd45fc25c1efd53e7e9010f251bdf60a332a06
Author: Mukul Kumar Singh 
AuthorDate: Mon Mar 25 17:03:30 2019 +0530

HDDS-1185. Optimize GetFileStatus in OzoneFileSystem by reducing the number 
of rpc call to OM. Contributed by Mukul Kumar Singh.
---
 .../apache/hadoop/ozone/client/OzoneBucket.java|  6 +-
 .../ozone/client/protocol/ClientProtocol.java  | 12 +++
 .../hadoop/ozone/client/rest/RestClient.java   |  9 ++
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  7 ++
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  1 +
 .../org/apache/hadoop/ozone/audit/OMAction.java|  5 +-
 .../hadoop/ozone/om/exceptions/OMException.java|  4 +-
 .../hadoop/ozone/om/helpers/OzoneFileStatus.java   | 97 ++
 .../ozone/om/protocol/OzoneManagerProtocol.java| 12 +++
 ...OzoneManagerProtocolClientSideTranslatorPB.java | 32 +++
 .../src/main/proto/OzoneManagerProtocol.proto  | 17 
 .../org/apache/hadoop/ozone/om/KeyManager.java |  3 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 58 +
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java | 25 ++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 23 +
 .../apache/hadoop/ozone/om/fs/OzoneManagerFS.java  | 31 +++
 .../apache/hadoop/ozone/om/fs/package-info.java| 21 +
 .../protocolPB/OzoneManagerRequestHandler.java | 17 
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java | 10 +--
 .../hadoop/fs/ozone/OzoneClientAdapterImpl.java| 49 ---
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java| 64 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   | 46 +-
 22 files changed, 441 insertions(+), 108 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 735bc04..9a12ab7 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.WithMetadata;
 
 import java.io.IOException;
@@ -464,8 +465,9 @@ public class OzoneBucket extends WithMetadata {
   partNumberMarker, maxParts);
   }
 
-
-
+  public OzoneFileStatus getFileStatus(String keyName) throws IOException {
+return proxy.getOzoneFileStatus(volumeName, name, keyName);
+  }
 
   /**
* An Iterator to iterate over {@link OzoneKey} list.
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 99e85a8..5378c6a 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -39,6 +39,7 @@ import java.net.URI;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.security.KerberosInfo;
@@ -533,4 +534,15 @@ public interface ClientProtocol {
* @return Canonical Service Name of ozone delegation token.
*/
   String getCanonicalServiceName();
+
+  /**
+   * Get the Ozone File Status for a particular Ozone key.
+   * @param volumeName volume name.
+   * @param bucketName bucket name.
+   * @param keyName key name.
+   * @return OzoneFileStatus for the key.
+   * @throws IOException
+   */
+  OzoneFileStatus getOzoneFileStatus(String volumeName,
+  String bucketName, String keyName) throws IOException;
 }
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index 48873a8..369b9fb 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -36,6 +36,7 @@ import 

[hadoop] branch trunk updated: HDDS-1317. KeyOutputStream#write throws ArrayIndexOutOfBoundsException when running RandomWrite MR examples. Contributed by Shashikant Banerjee.

2019-03-25 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d4e4a7d  HDDS-1317. KeyOutputStream#write throws 
ArrayIndexOutOfBoundsException when running RandomWrite MR examples. 
Contributed by Shashikant Banerjee.
d4e4a7d is described below

commit d4e4a7d4561b3d3644d7427b048d64966b4a0e61
Author: Shashikant Banerjee 
AuthorDate: Mon Mar 25 15:41:20 2019 +0530

HDDS-1317. KeyOutputStream#write throws ArrayIndexOutOfBoundsException when 
running RandomWrite MR examples. Contributed by Shashikant Banerjee.
---
 .../hadoop/hdds/scm/XceiverClientMetrics.java  |  20 +
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  90 +--
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 199 --
 .../apache/hadoop/hdds/scm/storage/BufferPool.java |  16 +-
 .../org/apache/hadoop/hdds/client/BlockID.java |   4 +-
 .../ozone/client/io/BlockOutputStreamEntry.java|   6 +-
 .../hadoop/ozone/client/io/KeyOutputStream.java|  59 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   7 +
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  12 +-
 .../ozone/client/rpc/TestBlockOutputStream.java| 690 +
 .../rpc/TestBlockOutputStreamWithFailures.java | 546 
 .../rpc/TestCloseContainerHandlingByClient.java|  11 +-
 .../ozone/container/ContainerTestHelper.java   |  17 +
 .../commandhandler/TestBlockDeletion.java  |   2 +-
 14 files changed, 1543 insertions(+), 136 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
index a430400..6c40921 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.metrics2.MetricsSystem;
@@ -37,7 +38,9 @@ public class XceiverClientMetrics {
   .getSimpleName();
 
   private @Metric MutableCounterLong pendingOps;
+  private @Metric MutableCounterLong totalOps;
   private MutableCounterLong[] pendingOpsArray;
+  private MutableCounterLong[] opsArray;
   private MutableRate[] containerOpsLatency;
   private MetricsRegistry registry;
 
@@ -46,12 +49,17 @@ public class XceiverClientMetrics {
 this.registry = new MetricsRegistry(SOURCE_NAME);
 
 this.pendingOpsArray = new MutableCounterLong[numEnumEntries];
+this.opsArray = new MutableCounterLong[numEnumEntries];
 this.containerOpsLatency = new MutableRate[numEnumEntries];
 for (int i = 0; i < numEnumEntries; i++) {
   pendingOpsArray[i] = registry.newCounter(
   "numPending" + ContainerProtos.Type.forNumber(i + 1),
   "number of pending" + ContainerProtos.Type.forNumber(i + 1) + " ops",
   (long) 0);
+  opsArray[i] = registry
+  .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1),
+  "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops",
+  (long) 0);
 
   containerOpsLatency[i] = registry.newRate(
   ContainerProtos.Type.forNumber(i + 1) + "Latency",
@@ -68,6 +76,8 @@ public class XceiverClientMetrics {
 
   public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) {
 pendingOps.incr();
+totalOps.incr();
+opsArray[type.ordinal()].incr();
 pendingOpsArray[type.ordinal()].incr();
   }
 
@@ -85,6 +95,16 @@ public class XceiverClientMetrics {
 return pendingOpsArray[type.ordinal()].value();
   }
 
+  @VisibleForTesting
+  public long getTotalOpCount() {
+return totalOps.value();
+  }
+
+  @VisibleForTesting
+  public long getContainerOpCountMetrics(ContainerProtos.Type type) {
+return opsArray[type.ordinal()].value();
+  }
+
   public void unRegister() {
 MetricsSystem ms = DefaultMetricsSystem.instance();
 ms.unregisterSource(SOURCE_NAME);
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 65241bf..a2e65e2 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import 

[hadoop] branch ozone-0.4 updated: HDDS-1317. KeyOutputStream#write throws ArrayIndexOutOfBoundsException when running RandomWrite MR examples. Contributed by Shashikant Banerjee.

2019-03-25 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new eed623a  HDDS-1317. KeyOutputStream#write throws 
ArrayIndexOutOfBoundsException when running RandomWrite MR examples. 
Contributed by Shashikant Banerjee.
eed623a is described below

commit eed623ad618d06784858f793d72ecc01126753ef
Author: Shashikant Banerjee 
AuthorDate: Mon Mar 25 15:41:20 2019 +0530

HDDS-1317. KeyOutputStream#write throws ArrayIndexOutOfBoundsException when 
running RandomWrite MR examples. Contributed by Shashikant Banerjee.
---
 .../hadoop/hdds/scm/XceiverClientMetrics.java  |  20 +
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  90 +--
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 199 --
 .../apache/hadoop/hdds/scm/storage/BufferPool.java |  16 +-
 .../org/apache/hadoop/hdds/client/BlockID.java |   4 +-
 .../ozone/client/io/BlockOutputStreamEntry.java|   6 +-
 .../hadoop/ozone/client/io/KeyOutputStream.java|  59 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   7 +
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  12 +-
 .../ozone/client/rpc/TestBlockOutputStream.java| 690 +
 .../rpc/TestBlockOutputStreamWithFailures.java | 546 
 .../rpc/TestCloseContainerHandlingByClient.java|  11 +-
 .../ozone/container/ContainerTestHelper.java   |  17 +
 .../commandhandler/TestBlockDeletion.java  |   2 +-
 14 files changed, 1543 insertions(+), 136 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
index a430400..6c40921 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.metrics2.MetricsSystem;
@@ -37,7 +38,9 @@ public class XceiverClientMetrics {
   .getSimpleName();
 
   private @Metric MutableCounterLong pendingOps;
+  private @Metric MutableCounterLong totalOps;
   private MutableCounterLong[] pendingOpsArray;
+  private MutableCounterLong[] opsArray;
   private MutableRate[] containerOpsLatency;
   private MetricsRegistry registry;
 
@@ -46,12 +49,17 @@ public class XceiverClientMetrics {
 this.registry = new MetricsRegistry(SOURCE_NAME);
 
 this.pendingOpsArray = new MutableCounterLong[numEnumEntries];
+this.opsArray = new MutableCounterLong[numEnumEntries];
 this.containerOpsLatency = new MutableRate[numEnumEntries];
 for (int i = 0; i < numEnumEntries; i++) {
   pendingOpsArray[i] = registry.newCounter(
   "numPending" + ContainerProtos.Type.forNumber(i + 1),
   "number of pending" + ContainerProtos.Type.forNumber(i + 1) + " ops",
   (long) 0);
+  opsArray[i] = registry
+  .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1),
+  "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops",
+  (long) 0);
 
   containerOpsLatency[i] = registry.newRate(
   ContainerProtos.Type.forNumber(i + 1) + "Latency",
@@ -68,6 +76,8 @@ public class XceiverClientMetrics {
 
   public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) {
 pendingOps.incr();
+totalOps.incr();
+opsArray[type.ordinal()].incr();
 pendingOpsArray[type.ordinal()].incr();
   }
 
@@ -85,6 +95,16 @@ public class XceiverClientMetrics {
 return pendingOpsArray[type.ordinal()].value();
   }
 
+  @VisibleForTesting
+  public long getTotalOpCount() {
+return totalOps.value();
+  }
+
+  @VisibleForTesting
+  public long getContainerOpCountMetrics(ContainerProtos.Type type) {
+return opsArray[type.ordinal()].value();
+  }
+
   public void unRegister() {
 MetricsSystem ms = DefaultMetricsSystem.instance();
 ms.unregisterSource(SOURCE_NAME);
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 65241bf..a2e65e2 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import