This is an automated email from the ASF dual-hosted git repository.

siddhant pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 92bc617801 HDDS-10462. Fail Datanode Decommission Early (#6367)
92bc617801 is described below

commit 92bc617801e31c3ad8a75602e1167300c6936b33
Author: Tejaskriya <[email protected]>
AuthorDate: Tue Apr 2 13:47:12 2024 +0530

    HDDS-10462. Fail Datanode Decommission Early (#6367)
---
 .../apache/hadoop/hdds/scm/client/ScmClient.java   |   3 +-
 .../protocol/StorageContainerLocationProtocol.java |   2 +-
 ...inerLocationProtocolClientSideTranslatorPB.java |   5 +-
 .../src/main/proto/ScmAdminProtocol.proto          |   1 +
 .../hdds/scm/node/NodeDecommissionManager.java     |  78 ++++-
 ...inerLocationProtocolServerSideTranslatorPB.java |   2 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |   4 +-
 .../hdds/scm/server/StorageContainerManager.java   |   2 +-
 .../hdds/scm/node/TestNodeDecommissionManager.java | 342 +++++++++++++++++++--
 .../hdds/scm/cli/ContainerOperationClient.java     |   4 +-
 .../scm/cli/datanode/DecommissionSubCommand.java   |   7 +-
 .../cli/datanode/TestDecommissionSubCommand.java   |   7 +-
 .../cli/datanode/TestMaintenanceSubCommand.java    |   3 +-
 .../cli/datanode/TestRecommissionSubCommand.java   |   3 +-
 .../scm/node/TestDecommissionAndMaintenance.java   |   4 +-
 .../ozone/recon/TestReconAndAdminContainerCLI.java |   4 +-
 16 files changed, 428 insertions(+), 43 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index c124e9a5c4..6a46741a06 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -217,11 +217,12 @@ public interface ScmClient extends Closeable {
    * Allows a list of hosts to be decommissioned. The hosts are identified
    * by their hostname and optionally port in the format foo.com:port.
    * @param hosts A list of hostnames, optionally with port
+   * @param force true to forcefully decommission Datanodes
    * @throws IOException
    * @return A list of DatanodeAdminError for any hosts which failed to
    *         decommission
    */
-  List<DatanodeAdminError> decommissionNodes(List<String> hosts)
+  List<DatanodeAdminError> decommissionNodes(List<String> hosts, boolean force)
       throws IOException;
 
   /**
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 1ebff65fc6..9083836631 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -245,7 +245,7 @@ public interface StorageContainerLocationProtocol extends 
Closeable {
 
   HddsProtos.Node queryNode(UUID uuid) throws IOException;
 
-  List<DatanodeAdminError> decommissionNodes(List<String> nodes)
+  List<DatanodeAdminError> decommissionNodes(List<String> nodes, boolean force)
       throws IOException;
 
   List<DatanodeAdminError> recommissionNodes(List<String> nodes)
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 76b15feada..b573ee0d04 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -528,15 +528,16 @@ public final class 
StorageContainerLocationProtocolClientSideTranslatorPB
   /**
    * Attempts to decommission the list of nodes.
    * @param nodes The list of hostnames or hostname:ports to decommission
+   * @param force true to skip fail-early checks and try to decommission nodes
    * @throws IOException
    */
   @Override
-  public List<DatanodeAdminError> decommissionNodes(List<String> nodes)
+  public List<DatanodeAdminError> decommissionNodes(List<String> nodes, 
boolean force)
       throws IOException {
     Preconditions.checkNotNull(nodes);
     DecommissionNodesRequestProto request =
         DecommissionNodesRequestProto.newBuilder()
-        .addAllHosts(nodes)
+        .addAllHosts(nodes).setForce(force)
         .build();
     DecommissionNodesResponseProto response =
         submitRequest(Type.DecommissionNodes,
diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto 
b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
index 0aba100817..eff9509937 100644
--- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
+++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
@@ -362,6 +362,7 @@ message DatanodeUsageInfoResponseProto {
 */
 message DecommissionNodesRequestProto {
   repeated string hosts = 1;
+  optional bool force = 2;
 }
 
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
index 4ace6d22d5..42a43ad589 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
@@ -21,10 +21,14 @@ import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
 import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -42,6 +46,7 @@ import java.util.Comparator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
@@ -56,6 +61,7 @@ public class NodeDecommissionManager {
   private final DatanodeAdminMonitor monitor;
 
   private final NodeManager nodeManager;
+  private ContainerManager containerManager;
   private final SCMContext scmContext;
   private final boolean useHostnames;
 
@@ -252,10 +258,11 @@ public class NodeDecommissionManager {
     return false;
   }
 
-  public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm,
+  public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, 
ContainerManager cm,
              SCMContext scmContext,
              EventPublisher eventQueue, ReplicationManager rm) {
     this.nodeManager = nm;
+    this.containerManager = cm;
     this.scmContext = scmContext;
 
     executor = Executors.newScheduledThreadPool(1,
@@ -305,9 +312,21 @@ public class NodeDecommissionManager {
   }
 
   public synchronized List<DatanodeAdminError> decommissionNodes(
-      List<String> nodes) {
+      List<String> nodes, boolean force) {
     List<DatanodeAdminError> errors = new ArrayList<>();
     List<DatanodeDetails> dns = mapHostnamesToDatanodes(nodes, errors);
+    // add check for fail-early if force flag is not set
+    if (!force) {
+      LOG.info("Force flag = {}. Checking if decommission is possible for dns: 
{}", force, dns);
+      boolean decommissionPossible = checkIfDecommissionPossible(dns, errors);
+      if (!decommissionPossible) {
+        LOG.error("Cannot decommission nodes as sufficient node are not 
available.");
+        errors.add(new DatanodeAdminError("AllHosts", "Sufficient nodes are 
not available."));
+        return errors;
+      }
+    } else {
+      LOG.info("Force flag = {}. Skip checking if decommission is possible for 
dns: {}", force, dns);
+    }
     for (DatanodeDetails dn : dns) {
       try {
         startDecommission(dn);
@@ -368,6 +387,61 @@ public class NodeDecommissionManager {
     }
   }
 
+  private synchronized boolean 
checkIfDecommissionPossible(List<DatanodeDetails> dns, List<DatanodeAdminError> 
errors) {
+    int numDecom = dns.size();
+    List<DatanodeDetails> validDns = new ArrayList<>(dns);
+    int inServiceTotal = 
nodeManager.getNodeCount(NodeStatus.inServiceHealthy());
+    for (DatanodeDetails dn : dns) {
+      try {
+        NodeStatus nodeStatus = getNodeStatus(dn);
+        NodeOperationalState opState = nodeStatus.getOperationalState();
+        if (opState != NodeOperationalState.IN_SERVICE) {
+          numDecom--;
+          validDns.remove(dn);
+        }
+      } catch (NodeNotFoundException ex) {
+        numDecom--;
+        validDns.remove(dn);
+      }
+    }
+
+    for (DatanodeDetails dn : validDns) {
+      Set<ContainerID> containers;
+      try {
+        containers = nodeManager.getContainers(dn);
+      } catch (NodeNotFoundException ex) {
+        LOG.warn("The host {} was not found in SCM. Ignoring the request to " +
+            "decommission it", dn.getHostName());
+        continue; // ignore the DN and continue to next one
+      }
+
+      for (ContainerID cid : containers) {
+        ContainerInfo cif;
+        try {
+          cif = containerManager.getContainer(cid);
+        } catch (ContainerNotFoundException ex) {
+          LOG.warn("Could not find container info for container {}.", cid);
+          continue; // ignore the container and continue to next one
+        }
+        synchronized (cif) {
+          if (cif.getState().equals(HddsProtos.LifeCycleState.DELETED) ||
+              cif.getState().equals(HddsProtos.LifeCycleState.DELETING)) {
+            continue;
+          }
+          int reqNodes = cif.getReplicationConfig().getRequiredNodes();
+          if ((inServiceTotal - numDecom) < reqNodes) {
+            LOG.info("Cannot decommission nodes. Tried to decommission {} 
nodes of which valid nodes = {}. " +
+                    "Cluster state: In-service nodes = {}, nodes required for 
replication = {}. " +
+                    "Failing due to datanode : {}, container : {}",
+                dns.size(), numDecom, inServiceTotal, reqNodes, dn, cid);
+            return false;
+          }
+        }
+      }
+    }
+    return true;
+  }
+
   public synchronized List<DatanodeAdminError> recommissionNodes(
       List<String> nodes) {
     List<DatanodeAdminError> errors = new ArrayList<>();
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 31f900c604..16a8cbd5a4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -1199,7 +1199,7 @@ public final class 
StorageContainerLocationProtocolServerSideTranslatorPB
   public DecommissionNodesResponseProto decommissionNodes(
       DecommissionNodesRequestProto request) throws IOException {
     List<DatanodeAdminError> errors =
-        impl.decommissionNodes(request.getHostsList());
+        impl.decommissionNodes(request.getHostsList(), request.getForce());
     DecommissionNodesResponseProto.Builder response =
         DecommissionNodesResponseProto.newBuilder();
     for (DatanodeAdminError e : errors) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 731e47f44c..ecfb92104d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -646,11 +646,11 @@ public class SCMClientProtocolServer implements
   }
 
   @Override
-  public List<DatanodeAdminError> decommissionNodes(List<String> nodes)
+  public List<DatanodeAdminError> decommissionNodes(List<String> nodes, 
boolean force)
       throws IOException {
     try {
       getScm().checkAdminAccess(getRemoteUser(), false);
-      return scm.getScmDecommissionManager().decommissionNodes(nodes);
+      return scm.getScmDecommissionManager().decommissionNodes(nodes, force);
     } catch (Exception ex) {
       LOG.error("Failed to decommission nodes", ex);
       throw ex;
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 11fdc0d16d..c6b809874c 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -845,7 +845,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
           pipelineManager, eventQueue, serviceManager, scmContext);
     }
 
-    scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager,
+    scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, 
containerManager,
         scmContext, eventQueue, replicationManager);
 
     statefulServiceStateManager = StatefulServiceStateManagerImpl.newBuilder()
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index 09f0dd59b9..a0c0280d40 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@ -18,20 +18,27 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
-import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
+import org.mockito.Mockito;
 
 import java.io.File;
 import java.io.IOException;
@@ -39,13 +46,21 @@ import java.util.List;
 import java.util.UUID;
 import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Set;
+import java.util.HashSet;
 
 import static java.util.Collections.singletonList;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 /**
  * Unit tests for the decommission manager.
@@ -56,15 +71,42 @@ public class TestNodeDecommissionManager {
   private NodeDecommissionManager decom;
   private StorageContainerManager scm;
   private NodeManager nodeManager;
+  private ContainerManager containerManager;
   private OzoneConfiguration conf;
+  private static int id = 1;
 
   @BeforeEach
   void setup(@TempDir File dir) throws Exception {
     conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath());
-    nodeManager = createNodeManager(conf);
-    decom = new NodeDecommissionManager(conf, nodeManager,
+    scm = HddsTestUtils.getScm(conf);
+    nodeManager = scm.getScmNodeManager();
+    containerManager = mock(ContainerManager.class);
+    decom = new NodeDecommissionManager(conf, nodeManager, containerManager,
         SCMContext.emptyContext(), new EventQueue(), null);
+    when(containerManager.allocateContainer(any(ReplicationConfig.class), 
anyString()))
+        .thenAnswer(invocation -> 
createMockContainer((ReplicationConfig)invocation.getArguments()[0],
+            (String) invocation.getArguments()[1]));
+  }
+
+  private ContainerInfo createMockContainer(ReplicationConfig rep, String 
owner) {
+    ContainerInfo.Builder builder = new ContainerInfo.Builder()
+        .setReplicationConfig(rep)
+        .setContainerID(id)
+        .setPipelineID(PipelineID.randomId())
+        .setState(OPEN)
+        .setOwner(owner);
+    id++;
+    return builder.build();
+  }
+  private ContainerInfo getMockContainer(ReplicationConfig rep, ContainerID 
conId) {
+    ContainerInfo.Builder builder = new ContainerInfo.Builder()
+        .setReplicationConfig(rep)
+        .setContainerID(conId.getId())
+        .setPipelineID(PipelineID.randomId())
+        .setState(OPEN)
+        .setOwner("admin");
+    return builder.build();
   }
 
   @Test
@@ -99,37 +141,37 @@ public class TestNodeDecommissionManager {
     // Try to decommission a host that does exist, but give incorrect port
     List<DatanodeAdminError> error =
         decom.decommissionNodes(
-            singletonList(dns.get(1).getIpAddress() + ":10"));
+            singletonList(dns.get(1).getIpAddress() + ":10"), false);
     assertEquals(1, error.size());
     assertThat(error.get(0).getHostname()).contains(dns.get(1).getIpAddress());
 
     // Try to decommission a host that does not exist
-    error = decom.decommissionNodes(singletonList("123.123.123.123"));
+    error = decom.decommissionNodes(singletonList("123.123.123.123"), false);
     assertEquals(1, error.size());
     assertThat(error.get(0).getHostname()).contains("123.123.123.123");
 
     // Try to decommission a host that does exist and a host that does not
     error  = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
-        "123,123,123,123"));
+        "123,123,123,123"), false);
     assertEquals(1, error.size());
     assertThat(error.get(0).getHostname()).contains("123,123,123,123");
 
     // Try to decommission a host with many DNs on the address with no port
-    error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress()));
+    error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress()), 
false);
     assertEquals(1, error.size());
     assertThat(error.get(0).getHostname()).contains(dns.get(0).getIpAddress());
 
     // Try to decommission a host with many DNs on the address with a port
     // that does not exist
     error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress()
-        + ":10"));
+        + ":10"), false);
     assertEquals(1, error.size());
     assertThat(error.get(0).getHostname()).contains(dns.get(0).getIpAddress() 
+ ":10");
 
     // Try to decommission 2 hosts with address that does not exist
     // Both should return error
     error  = decom.decommissionNodes(Arrays.asList(
-        "123.123.123.123", "234.234.234.234"));
+        "123.123.123.123", "234.234.234.234"), false);
     assertEquals(2, error.size());
     assertTrue(error.get(0).getHostname().contains("123.123.123.123") &&
         error.get(1).getHostname().contains("234.234.234.234"));
@@ -142,7 +184,7 @@ public class TestNodeDecommissionManager {
 
     // Decommission 2 valid nodes
     decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
-        dns.get(2).getIpAddress()));
+        dns.get(2).getIpAddress()), false);
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
@@ -151,14 +193,14 @@ public class TestNodeDecommissionManager {
     // Running the command again gives no error - nodes already decommissioning
     // are silently ignored.
     decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
-        dns.get(2).getIpAddress()));
+        dns.get(2).getIpAddress()), false);
 
     // Attempt to decommission dn(10) which has multiple hosts on the same IP
     // and we hardcoded ports to 3456, 4567, 5678
     DatanodeDetails multiDn = dns.get(10);
     String multiAddr =
         multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue();
-    decom.decommissionNodes(singletonList(multiAddr));
+    decom.decommissionNodes(singletonList(multiAddr), false);
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(multiDn).getOperationalState());
 
@@ -166,7 +208,7 @@ public class TestNodeDecommissionManager {
     // dn(11) with identical ports.
     nodeManager.processHeartbeat(dns.get(9));
     DatanodeDetails duplicatePorts = dns.get(9);
-    decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress()));
+    decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress()), 
false);
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(duplicatePorts).getOperationalState());
 
@@ -217,13 +259,13 @@ public class TestNodeDecommissionManager {
 
     // Attempt to decommission with just the IP, which should fail.
     List<DatanodeAdminError> error =
-        decom.decommissionNodes(singletonList(extraDN.getIpAddress()));
+        decom.decommissionNodes(singletonList(extraDN.getIpAddress()), false);
     assertEquals(1, error.size());
     assertThat(error.get(0).getHostname()).contains(extraDN.getIpAddress());
 
     // Now try the one with the unique port
     decom.decommissionNodes(
-        singletonList(extraDN.getIpAddress() + ":" + ratisPort + 1));
+        singletonList(extraDN.getIpAddress() + ":" + ratisPort + 1), false);
 
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(extraDN).getOperationalState());
@@ -239,7 +281,7 @@ public class TestNodeDecommissionManager {
     nodeManager.processHeartbeat(expectedDN);
 
     decom.decommissionNodes(singletonList(
-        expectedDN.getIpAddress() + ":" + ratisPort));
+        expectedDN.getIpAddress() + ":" + ratisPort), false);
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(expectedDN).getOperationalState());
     // The other duplicate is still in service
@@ -323,7 +365,7 @@ public class TestNodeDecommissionManager {
     // Try to go from maint to decom:
     List<String> dn = new ArrayList<>();
     dn.add(dns.get(1).getIpAddress());
-    List<DatanodeAdminError> errors = decom.decommissionNodes(dn);
+    List<DatanodeAdminError> errors = decom.decommissionNodes(dn, false);
     assertEquals(1, errors.size());
     assertEquals(dns.get(1).getHostName(), errors.get(0).getHostname());
 
@@ -369,10 +411,268 @@ public class TestNodeDecommissionManager {
     assertEquals(decom.getMonitor().getTrackedNodes().size(), 3);
   }
 
-  private SCMNodeManager createNodeManager(OzoneConfiguration config)
-      throws IOException, AuthenticationException {
-    scm = HddsTestUtils.getScm(config);
-    return (SCMNodeManager) scm.getScmNodeManager();
+  @Test
+  public void testInsufficientNodeDecommissionThrowsExceptionForRatis() throws
+      NodeNotFoundException, IOException {
+    when(containerManager.getContainer(any(ContainerID.class)))
+        .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig
+                .getInstance(HddsProtos.ReplicationFactor.THREE), 
(ContainerID)invocation.getArguments()[0]));
+    List<DatanodeAdminError> error;
+    List<DatanodeDetails> dns = new ArrayList<>();
+
+    for (int i = 0; i < 5; i++) {
+      DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+      dns.add(dn);
+      nodeManager.register(dn, null, null);
+    }
+
+    Set<ContainerID> idsRatis = new HashSet<>();
+    for (int i = 0; i < 5; i++) {
+      ContainerInfo container = containerManager.allocateContainer(
+          
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), 
"admin");
+      idsRatis.add(container.containerID());
+    }
+
+    for (DatanodeDetails dn  : nodeManager.getAllNodes().subList(0, 3)) {
+      nodeManager.setContainers(dn, idsRatis);
+    }
+
+    error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
+        dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), 
dns.get(4).getIpAddress()), false);
+    assertTrue(error.get(0).getHostname().contains("AllHosts"));
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(3)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(4)).getOperationalState());
+
+    error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
+        dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), 
dns.get(4).getIpAddress()), true);
+    assertEquals(0, error.size());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(3)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(4)).getOperationalState());
+  }
+
+  @Test
+  public void testInsufficientNodeDecommissionThrowsExceptionForEc() throws
+      NodeNotFoundException, IOException {
+    when(containerManager.getContainer(any(ContainerID.class)))
+        .thenAnswer(invocation -> getMockContainer(new ECReplicationConfig(3, 
2),
+            (ContainerID)invocation.getArguments()[0]));
+    List<DatanodeAdminError> error;
+    List<DatanodeDetails> dns = new ArrayList<>();
+
+    for (int i = 0; i < 5; i++) {
+      DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+      dns.add(dn);
+      nodeManager.register(dn, null, null);
+    }
+
+    Set<ContainerID> idsEC = new HashSet<>();
+    for (int i = 0; i < 5; i++) {
+      ContainerInfo container = containerManager.allocateContainer(new 
ECReplicationConfig(3, 2), "admin");
+      idsEC.add(container.containerID());
+    }
+
+    for (DatanodeDetails dn  : nodeManager.getAllNodes()) {
+      nodeManager.setContainers(dn, idsEC);
+    }
+
+    error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), 
false);
+    assertTrue(error.get(0).getHostname().contains("AllHosts"));
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+    error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), 
true);
+    assertEquals(0, error.size());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+  }
+
+  @Test
+  public void testInsufficientNodeDecommissionThrowsExceptionRatisAndEc() 
throws
+      NodeNotFoundException, IOException {
+    List<DatanodeAdminError> error;
+    List<DatanodeDetails> dns = new ArrayList<>();
+
+    for (int i = 0; i < 5; i++) {
+      DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+      dns.add(dn);
+      nodeManager.register(dn, null, null);
+    }
+
+    Set<ContainerID> idsRatis = new HashSet<>();
+    ContainerInfo containerRatis = containerManager.allocateContainer(
+        
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), 
"admin");
+    idsRatis.add(containerRatis.containerID());
+    Set<ContainerID> idsEC = new HashSet<>();
+    ContainerInfo containerEC = containerManager.allocateContainer(new 
ECReplicationConfig(3, 2), "admin");
+    idsEC.add(containerEC.containerID());
+
+    when(containerManager.getContainer(any(ContainerID.class)))
+        .thenAnswer(invocation -> {
+          ContainerID containerID = (ContainerID)invocation.getArguments()[0];
+          if (idsEC.contains(containerID)) {
+            return getMockContainer(new ECReplicationConfig(3, 2),
+                (ContainerID)invocation.getArguments()[0]);
+          }
+          return 
getMockContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+              (ContainerID)invocation.getArguments()[0]);
+        });
+
+    for (DatanodeDetails dn  : nodeManager.getAllNodes().subList(0, 3)) {
+      nodeManager.setContainers(dn, idsRatis);
+    }
+    for (DatanodeDetails dn  : nodeManager.getAllNodes()) {
+      nodeManager.setContainers(dn, idsEC);
+    }
+
+    error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), 
false);
+    assertTrue(error.get(0).getHostname().contains("AllHosts"));
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+    error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), 
true);
+    assertEquals(0, error.size());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+  }
+
+  @Test
+  public void testInsufficientNodeDecommissionChecksNotInService() throws
+      NodeNotFoundException, IOException {
+    when(containerManager.getContainer(any(ContainerID.class)))
+        .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig
+            .getInstance(HddsProtos.ReplicationFactor.THREE), 
(ContainerID)invocation.getArguments()[0]));
+
+    List<DatanodeAdminError> error;
+    List<DatanodeDetails> dns = new ArrayList<>();
+
+    for (int i = 0; i < 5; i++) {
+      DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+      dns.add(dn);
+      nodeManager.register(dn, null, null);
+    }
+
+    Set<ContainerID> idsRatis = new HashSet<>();
+    for (int i = 0; i < 5; i++) {
+      ContainerInfo container = containerManager.allocateContainer(
+          
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), 
"admin");
+      idsRatis.add(container.containerID());
+    }
+
+    for (DatanodeDetails dn  : nodeManager.getAllNodes().subList(0, 3)) {
+      nodeManager.setContainers(dn, idsRatis);
+    }
+
+    // decommission one node successfully
+    error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress()), 
false);
+    assertEquals(0, error.size());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(0)).getOperationalState());
+    // try to decommission 2 nodes, one in service and one in decommissioning 
state, should be successful.
+    error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(),
+        dns.get(1).getIpAddress()), false);
+    assertEquals(0, error.size());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(0)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+  }
+
+  @Test
+  public void testInsufficientNodeDecommissionChecksForNNF() throws
+      NodeNotFoundException, IOException {
+    List<DatanodeAdminError> error;
+    List<DatanodeDetails> dns = new ArrayList<>();
+
+    for (int i = 0; i < 5; i++) {
+      DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+      dns.add(dn);
+    }
+    Set<ContainerID> idsRatis = new HashSet<>();
+    for (int i = 0; i < 3; i++) {
+      ContainerInfo container = containerManager.allocateContainer(
+          
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), 
"admin");
+      idsRatis.add(container.containerID());
+    }
+
+    nodeManager = mock(NodeManager.class);
+    decom = new NodeDecommissionManager(conf, nodeManager, containerManager,
+        SCMContext.emptyContext(), new EventQueue(), null);
+    when(containerManager.getContainer(any(ContainerID.class)))
+        .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig
+            .getInstance(HddsProtos.ReplicationFactor.THREE), 
(ContainerID)invocation.getArguments()[0]));
+    when(nodeManager.getNodesByAddress(any())).thenAnswer(invocation ->
+        getDatanodeDetailsList((String)invocation.getArguments()[0], dns));
+    when(nodeManager.getContainers(any())).thenReturn(idsRatis);
+    when(nodeManager.getNodeCount(any())).thenReturn(5);
+
+    when(nodeManager.getNodeStatus(any())).thenAnswer(invocation ->
+        getNodeOpState((DatanodeDetails) invocation.getArguments()[0], dns));
+    Mockito.doAnswer(invocation -> {
+      setNodeOpState((DatanodeDetails)invocation.getArguments()[0],
+          (HddsProtos.NodeOperationalState)invocation.getArguments()[1], dns);
+      return null;
+    }).when(nodeManager).setNodeOperationalState(any(DatanodeDetails.class), 
any(
+        HddsProtos.NodeOperationalState.class));
+
+    error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
+        dns.get(2).getIpAddress(), dns.get(3).getIpAddress()), false);
+    assertTrue(error.get(0).getHostname().contains("AllHosts"));
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+        nodeManager.getNodeStatus(dns.get(3)).getOperationalState());
+
+    error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(),
+        dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), false);
+    assertFalse(error.get(0).getHostname().contains("AllHosts"));
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+  }
+
+  private List<DatanodeDetails> getDatanodeDetailsList(String ipaddress, 
List<DatanodeDetails> dns) {
+    List<DatanodeDetails> datanodeDetails = new ArrayList<>();
+    for (DatanodeDetails dn : dns) {
+      if (dn.getIpAddress().equals(ipaddress)) {
+        datanodeDetails.add(dn);
+        break;
+      }
+    }
+    return datanodeDetails;
+  }
+
+  private void setNodeOpState(DatanodeDetails dn, 
HddsProtos.NodeOperationalState newState, List<DatanodeDetails> dns) {
+    for (DatanodeDetails datanode : dns) {
+      if (datanode.equals(dn)) {
+        datanode.setPersistedOpState(newState);
+        break;
+      }
+    }
+  }
+
+  private NodeStatus getNodeOpState(DatanodeDetails dn, List<DatanodeDetails> 
dns) throws NodeNotFoundException {
+    if (dn.equals(dns.get(0))) {
+      throw new NodeNotFoundException();
+    }
+    for (DatanodeDetails datanode : dns) {
+      if (datanode.equals(dn)) {
+        return new NodeStatus(datanode.getPersistedOpState(), 
HddsProtos.NodeState.HEALTHY);
+      }
+    }
+    return null;
   }
 
   /**
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 0c9127e85f..d51479d44b 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -237,9 +237,9 @@ public class ContainerOperationClient implements ScmClient {
   }
 
   @Override
-  public List<DatanodeAdminError> decommissionNodes(List<String> hosts)
+  public List<DatanodeAdminError> decommissionNodes(List<String> hosts, 
boolean force)
       throws IOException {
-    return storageContainerLocationClient.decommissionNodes(hosts);
+    return storageContainerLocationClient.decommissionNodes(hosts, force);
   }
 
   @Override
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
index e7d3a44438..31123ae81b 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
@@ -48,6 +48,11 @@ public class DecommissionSubCommand extends ScmSubcommand {
           paramLabel = "<host name>")
   private List<String> parameters = new ArrayList<>();
 
+  @CommandLine.Option(names = { "--force" },
+      defaultValue = "false",
+      description = "Forcefully try to decommission the datanode(s)")
+  private boolean force;
+
   @Override
   public void execute(ScmClient scmClient) throws IOException {
     if (parameters.size() > 0) {
@@ -62,7 +67,7 @@ public class DecommissionSubCommand extends ScmSubcommand {
       } else {
         hosts = parameters;
       }
-      List<DatanodeAdminError> errors = scmClient.decommissionNodes(hosts);
+      List<DatanodeAdminError> errors = scmClient.decommissionNodes(hosts, 
force);
       System.out.println("Started decommissioning datanode(s):\n" +
           String.join("\n", hosts));
       if (errors.size() > 0) {
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
index e7e01ffaa1..d6f0f8ae82 100644
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
+++ 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
@@ -37,6 +37,7 @@ import picocli.CommandLine;
 
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.Mockito.anyList;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -71,7 +72,7 @@ public class TestDecommissionSubCommand {
 
   @Test
   public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
-    when(scmClient.decommissionNodes(anyList()))
+    when(scmClient.decommissionNodes(anyList(), anyBoolean()))
             .thenAnswer(invocation -> new ArrayList<DatanodeAdminError>());
 
     String input = "host1\nhost2\nhost3\n";
@@ -100,7 +101,7 @@ public class TestDecommissionSubCommand {
 
   @Test
   public void testNoErrorsWhenDecommissioning() throws IOException  {
-    when(scmClient.decommissionNodes(anyList()))
+    when(scmClient.decommissionNodes(anyList(), anyBoolean()))
         .thenAnswer(invocation -> new ArrayList<DatanodeAdminError>());
 
     CommandLine c = new CommandLine(cmd);
@@ -123,7 +124,7 @@ public class TestDecommissionSubCommand {
 
   @Test
   public void testErrorsReportedWhenDecommissioning() throws IOException  {
-    when(scmClient.decommissionNodes(anyList()))
+    when(scmClient.decommissionNodes(anyList(), anyBoolean()))
         .thenAnswer(invocation -> {
           ArrayList<DatanodeAdminError> e = new ArrayList<>();
           e.add(new DatanodeAdminError("host1", "host1 error"));
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
index d2a4c54b8b..a6225d1b5d 100644
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
+++ 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
@@ -37,6 +37,7 @@ import picocli.CommandLine;
 
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.Mockito.anyInt;
 import static org.mockito.Mockito.anyList;
 import static org.mockito.Mockito.mock;
@@ -72,7 +73,7 @@ public class TestMaintenanceSubCommand {
 
   @Test
   public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
-    when(scmClient.decommissionNodes(anyList()))
+    when(scmClient.decommissionNodes(anyList(), anyBoolean()))
             .thenAnswer(invocation -> new ArrayList<DatanodeAdminError>());
 
     String input = "host1\nhost2\nhost3\n";
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
index e274cd4fd5..083ada8a42 100644
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
+++ 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
@@ -37,6 +37,7 @@ import picocli.CommandLine;
 
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.Mockito.anyList;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -71,7 +72,7 @@ public class TestRecommissionSubCommand {
 
   @Test
   public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
-    when(scmClient.decommissionNodes(anyList()))
+    when(scmClient.decommissionNodes(anyList(), anyBoolean()))
             .thenAnswer(invocation -> new ArrayList<DatanodeAdminError>());
 
     String input = "host1\nhost2\nhost3\n";
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
index 3f7267b4fd..fb4cb3ba4c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
@@ -211,7 +211,7 @@ public class TestDecommissionAndMaintenance {
     final DatanodeDetails toDecommission = nm.getNodeByUuid(dnID.toString());
 
     scmClient.decommissionNodes(Arrays.asList(
-        getDNHostAndPort(toDecommission)));
+        getDNHostAndPort(toDecommission)), false);
 
     waitForDnToReachOpState(nm, toDecommission, DECOMMISSIONED);
     // Ensure one node transitioned to DECOMMISSIONING
@@ -265,7 +265,7 @@ public class TestDecommissionAndMaintenance {
         waitForAndReturnContainer(ratisRepConfig, 3);
     final DatanodeDetails dn
         = getOneDNHostingReplica(getContainerReplicas(container));
-    scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dn)));
+    scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dn)), false);
 
     // Wait for the state to be persisted on the DN so it can report it on
     // restart of SCM.
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java
index 7691704d92..9fcb82fd4b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java
@@ -246,7 +246,7 @@ class TestReconAndAdminContainerCLI {
           TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), 0);
     } else {
       scmClient.decommissionNodes(Collections.singletonList(
-          TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)));
+          TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), false);
     }
 
     TestNodeUtil.waitForDnToReachOpState(scmNodeManager,
@@ -273,7 +273,7 @@ class TestReconAndAdminContainerCLI {
           TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), 0);
     } else {
       scmClient.decommissionNodes(Collections.singletonList(
-          TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)));
+          TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), false);
     }
 
     TestNodeUtil.waitForDnToReachOpState(scmNodeManager,


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to