This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 10811c597f HDDS-7666. EC: Unrecoverable EC containers with some 
remaining replicas may block decommissioning (#4118)
10811c597f is described below

commit 10811c597fb724060b6577d10a70bd638cfcd69c
Author: Stephen O'Donnell <[email protected]>
AuthorDate: Thu Jan 5 12:39:35 2023 +0000

    HDDS-7666. EC: Unrecoverable EC containers with some remaining replicas may 
block decommissioning (#4118)
---
 .../replication/ContainerHealthResult.java         | 22 ++++++
 .../replication/ContainerReplicaCount.java         |  3 +
 .../replication/ECContainerReplicaCount.java       | 45 ++++++++++++
 .../replication/ECUnderReplicationHandler.java     |  5 --
 .../replication/RatisContainerReplicaCount.java    | 12 ++++
 .../health/ECReplicationCheckHandler.java          | 24 ++++---
 .../hdds/scm/node/DatanodeAdminMonitorImpl.java    |  2 +-
 .../replication/TestECContainerReplicaCount.java   | 42 ++++++++++-
 .../replication/TestECUnderReplicationHandler.java | 52 +++++++++++++-
 .../health/TestECReplicationCheckHandler.java      | 84 ++++++++++++++++++++++
 .../scm/node/DatanodeAdminMonitorTestUtil.java     | 78 ++++++++++++++++++--
 .../hdds/scm/node/TestDatanodeAdminMonitor.java    | 60 ++++++++++++++++
 12 files changed, 408 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
index 6393f0349d..d5b92935ce 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java
@@ -108,6 +108,7 @@ public class ContainerHealthResult {
     private final boolean dueToDecommission;
     private final boolean sufficientlyReplicatedAfterPending;
     private final boolean unrecoverable;
+    private boolean hasUnReplicatedOfflineIndexes = false;
     private int requeueCount = 0;
 
     public UnderReplicatedHealthResult(ContainerInfo containerInfo,
@@ -206,6 +207,27 @@ public class ContainerHealthResult {
     public boolean isUnrecoverable() {
       return unrecoverable;
     }
+
+    /**
+     * Pass true if a container has some indexes which are only on nodes
+     * which are DECOMMISSIONING or ENTERING_MAINTENANCE. These containers may
+     * need to be processed even if they are unrecoverable.
+     * @param val pass true if the container has indexes on nodes going offline
+     *            or false otherwise.
+     */
+    public void setHasUnReplicatedOfflineIndexes(boolean val) {
+      hasUnReplicatedOfflineIndexes = val;
+    }
+    /**
+     * Indicates whether a container has some indexes which are only on nodes
+     * which are DECOMMISSIONING or ENTERING_MAINTENANCE. These containers may
+     * need to be processed even if they are unrecoverable.
+     * @return True if the container has some decommission or maintenance only
+     *         indexes.
+     */
+    public boolean hasUnreplicatedOfflineIndexes() {
+      return hasUnReplicatedOfflineIndexes;
+    }
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java
index a3160208e3..06fed7438f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.container.replication;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
@@ -36,6 +37,8 @@ public interface ContainerReplicaCount {
 
   boolean isSufficientlyReplicated();
 
+  boolean isSufficientlyReplicatedForOffline(DatanodeDetails datanode);
+
   boolean isOverReplicated();
 
   int getDecommissionCount();
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
index 1360c84016..57539a98ff 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdds.scm.container.replication;
 
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -36,6 +37,7 @@ import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalSt
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
 
 /**
  * This class provides a set of methods to test for over / under replication of
@@ -421,6 +423,49 @@ public class ECContainerReplicaCount implements 
ContainerReplicaCount {
         >= repConfig.getData() + remainingMaintenanceRedundancy;
   }
 
+  /**
+   * If we are checking a container for sufficient replication for "offline",
+   * ie decommission or maintenance, then it is not really a requirement that
+   * all replicas for the container are present. Instead, we can ensure the
+   * replica on the node going offline has a copy elsewhere on another
+   * IN_SERVICE node, and if so that replica is sufficiently replicated.
+   * @param datanode The datanode being checked to go offline.
+   * @return True if the container is sufficiently replicated or if this 
replica
+   *         on the passed node is present elsewhere on an IN_SERVICE node.
+   */
+  @Override
+  public boolean isSufficientlyReplicatedForOffline(DatanodeDetails datanode) {
+    boolean sufficientlyReplicated = isSufficientlyReplicated(false);
+    if (sufficientlyReplicated) {
+      return true;
+    }
+    // If it is not sufficiently replicated (ie the container has all replicas)
+    // then we need to check if the replica that is on this node is available
+    // on another ONLINE node, ie in the healthy set. This means we avoid
+    // blocking decommission or maintenance caused by un-recoverable EC
+    // containers.
+    if (datanode.getPersistedOpState() == IN_SERVICE) {
+      // The node passed into this method must be a node going offline, so it
+      // cannot be IN_SERVICE. If an IN_SERVICE mode is passed, just return
+      // false.
+      return false;
+    }
+    ContainerReplica thisReplica = null;
+    for (ContainerReplica r : replicas) {
+      if (r.getDatanodeDetails().equals(datanode)) {
+        thisReplica = r;
+        break;
+      }
+    }
+    if (thisReplica == null) {
+      // From the set of replicas, none are on the passed datanode.
+      // This should not happen in practice but if it does we cannot indicate
+      // the container is sufficiently replicated.
+      return false;
+    }
+    return healthyIndexes.containsKey(thisReplica.getReplicaIndex());
+  }
+
   @Override
   public boolean isSufficientlyReplicated() {
     return isSufficientlyReplicated(false);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
index 017e93ee2a..a1e7f1a73f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
@@ -136,11 +136,6 @@ public class ECUnderReplicationHandler implements 
UnhealthyReplicationHandler {
           container.getContainerID(), replicaCount.getReplicas());
       return emptyMap();
     }
-    if (replicaCount.isUnrecoverable()) {
-      LOG.warn("The container {} is unrecoverable. The available replicas" +
-          " are: {}.", container.containerID(), replicaCount.getReplicas());
-      return emptyMap();
-    }
 
     // don't place reconstructed replicas on exclude nodes, since they already
     // have replicas
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
index 577fc6004d..1363ba992a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.container.replication;
 
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
@@ -247,6 +248,17 @@ public class RatisContainerReplicaCount implements 
ContainerReplicaCount {
     return isSufficientlyReplicated(false);
   }
 
+  /**
+   * For Ratis, this method is the same as isSufficientlyReplicated.
+   * @param datanode Not used in this implementation
+   * @return True if the container is sufficiently replicated and False
+   *         otherwise.
+   */
+  @Override
+  public boolean isSufficientlyReplicatedForOffline(DatanodeDetails datanode) {
+    return isSufficientlyReplicated();
+  }
+
   /**
    * Return true if the container is sufficiently replicated. Decommissioning
    * and Decommissioned containers are ignored in this check, assuming they 
will
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java
index cdd2f565ea..af12875219 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java
@@ -85,13 +85,15 @@ public class ECReplicationCheckHandler extends 
AbstractCheck {
             ReplicationManagerReport.HealthState.MISSING, containerID);
       }
       if (!underHealth.isReplicatedOkAfterPending() &&
-          !underHealth.isUnrecoverable()) {
+          (!underHealth.isUnrecoverable()
+              || underHealth.hasUnreplicatedOfflineIndexes())) {
         request.getReplicationQueue().enqueue(underHealth);
       }
       LOG.debug("Container {} is Under Replicated. isReplicatedOkAfterPending "
-          + "is [{}]. isUnrecoverable is [{}]", container,
-          underHealth.isReplicatedOkAfterPending(),
-          underHealth.isUnrecoverable());
+          + "is [{}]. isUnrecoverable is [{}]. hasUnreplicatedOfflineIndexes "
+          + "is [{}]", container, underHealth.isReplicatedOkAfterPending(),
+          underHealth.isUnrecoverable(),
+          underHealth.hasUnreplicatedOfflineIndexes());
       return true;
     } else if (health.getHealthState()
         == ContainerHealthResult.HealthState.OVER_REPLICATED) {
@@ -149,10 +151,16 @@ public class ECReplicationCheckHandler extends 
AbstractCheck {
         dueToDecommission = false;
         remainingRedundancy = repConfig.getParity() - missingIndexes.size();
       }
-      return new ContainerHealthResult.UnderReplicatedHealthResult(
-          container, remainingRedundancy, dueToDecommission,
-          replicaCount.isSufficientlyReplicated(true),
-          replicaCount.isUnrecoverable());
+      ContainerHealthResult.UnderReplicatedHealthResult result =
+          new ContainerHealthResult.UnderReplicatedHealthResult(
+              container, remainingRedundancy, dueToDecommission,
+              replicaCount.isSufficientlyReplicated(true),
+              replicaCount.isUnrecoverable());
+      if (replicaCount.decommissioningOnlyIndexes(true).size() > 0
+          || replicaCount.maintenanceOnlyIndexes(true).size() > 0) {
+        result.setHasUnReplicatedOfflineIndexes(true);
+      }
+      return result;
     }
 
     if (replicaCount.isOverReplicated(false)) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index 4c446acce7..0bd644a0cb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -345,7 +345,7 @@ public class DatanodeAdminMonitorImpl implements 
DatanodeAdminMonitor {
       try {
         ContainerReplicaCount replicaSet =
             replicationManager.getContainerReplicaCount(cid);
-        if (replicaSet.isSufficientlyReplicated()) {
+        if (replicaSet.isSufficientlyReplicatedForOffline(dn)) {
           sufficientlyReplicated++;
         } else {
           if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
index cf844f2845..4cff98ae53 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
@@ -42,6 +42,7 @@ import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalSt
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
 import static 
org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil.createContainerReplica;
 
 /**
@@ -107,7 +108,7 @@ public class TestECContainerReplicaCount {
   public void testUnderReplicationDueToUnhealthyReplica() {
     Set<ContainerReplica> replicas =
         ReplicationTestUtil.createReplicas(container.containerID(),
-            ContainerReplicaProto.State.CLOSED, 1, 2, 3, 4);
+            CLOSED, 1, 2, 3, 4);
     ContainerReplica unhealthyIndex5 =
         createContainerReplica(container.containerID(), 5,
             IN_SERVICE, ContainerReplicaProto.State.UNHEALTHY);
@@ -572,4 +573,43 @@ public class TestECContainerReplicaCount {
     Assertions
         .assertEquals(ImmutableSet.of(), 
rcnt.decommissioningOnlyIndexes(true));
   }
+
+  @Test
+  public void testSufficientlyReplicatedForOffline() {
+    Set<ContainerReplica> replica = ReplicationTestUtil
+        .createReplicas(Pair.of(IN_SERVICE, 2));
+
+    ContainerReplica inServiceReplica =
+        ReplicationTestUtil.createContainerReplica(container.containerID(),
+            1, IN_SERVICE, CLOSED);
+    replica.add(inServiceReplica);
+
+    ContainerReplica offlineReplica =
+        ReplicationTestUtil.createContainerReplica(container.containerID(),
+            1, DECOMMISSIONING, CLOSED);
+    replica.add(offlineReplica);
+
+    ContainerReplica offlineNotReplicated =
+        ReplicationTestUtil.createContainerReplica(container.containerID(),
+            3, DECOMMISSIONING, CLOSED);
+    replica.add(offlineNotReplicated);
+
+    ECContainerReplicaCount rcnt =
+        new ECContainerReplicaCount(container, replica, 
Collections.emptyList(),
+            1);
+    Assertions.assertFalse(rcnt.isSufficientlyReplicated(false));
+    Assertions.assertTrue(rcnt.isSufficientlyReplicatedForOffline(
+        offlineReplica.getDatanodeDetails()));
+    Assertions.assertFalse(rcnt.isSufficientlyReplicatedForOffline(
+        offlineNotReplicated.getDatanodeDetails()));
+
+    // A random DN not hosting a replica for this container should return 
false.
+    Assertions.assertFalse(rcnt.isSufficientlyReplicatedForOffline(
+        MockDatanodeDetails.randomDatanodeDetails()));
+
+    // Passing the IN_SERVICE node should return false even though the
+    // replica is on a healthy node
+    Assertions.assertFalse(rcnt.isSufficientlyReplicatedForOffline(
+        inServiceReplica.getDatanodeDetails()));
+  }
 }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
index f2cac1f2a1..58989daa48 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
@@ -85,6 +85,7 @@ public class TestECUnderReplicationHandler {
   private static final int DATA = 3;
   private static final int PARITY = 2;
   private PlacementPolicy ecPlacementPolicy;
+  private int remainingMaintenanceRedundancy = 1;
 
   @BeforeEach
   public void setup() {
@@ -564,6 +565,56 @@ public class TestECUnderReplicationHandler {
     Assertions.assertFalse(commands.containsKey(dn));
   }
 
+  @Test
+  public void testDecommissioningIndexCopiedWhenContainerUnRecoverable()
+      throws IOException {
+    Set<ContainerReplica> availableReplicas = ReplicationTestUtil
+        .createReplicas(Pair.of(IN_SERVICE, 1));
+    ContainerReplica decomReplica = ReplicationTestUtil.createContainerReplica(
+        container.containerID(), 2, DECOMMISSIONING, CLOSED);
+    availableReplicas.add(decomReplica);
+    Map<DatanodeDetails, SCMCommand<?>> cmds =
+        testUnderReplicationWithMissingIndexes(Collections.emptyList(),
+            availableReplicas, 1, 0, policy);
+    Assertions.assertEquals(1, cmds.size());
+    ReplicateContainerCommand cmd =
+        (ReplicateContainerCommand) cmds.values().iterator().next();
+
+    List<DatanodeDetails> sources = cmd.getSourceDatanodes();
+    Assertions.assertEquals(1, sources.size());
+    Assertions.assertEquals(decomReplica.getDatanodeDetails(),
+        cmd.getSourceDatanodes().get(0));
+  }
+
+  @Test
+  public void testMaintenanceIndexCopiedWhenContainerUnRecoverable()
+      throws IOException {
+    Set<ContainerReplica> availableReplicas = ReplicationTestUtil
+        .createReplicas(Pair.of(IN_SERVICE, 1));
+    ContainerReplica maintReplica = ReplicationTestUtil.createContainerReplica(
+        container.containerID(), 2, ENTERING_MAINTENANCE, CLOSED);
+    availableReplicas.add(maintReplica);
+
+    Map<DatanodeDetails, SCMCommand<?>> cmds =
+        testUnderReplicationWithMissingIndexes(Collections.emptyList(),
+            availableReplicas, 0, 1, policy);
+    Assertions.assertEquals(0, cmds.size());
+
+    // Change the remaining redundancy to ensure something needs copied.
+    remainingMaintenanceRedundancy = 2;
+    cmds = testUnderReplicationWithMissingIndexes(Collections.emptyList(),
+        availableReplicas, 0, 1, policy);
+
+    Assertions.assertEquals(1, cmds.size());
+    ReplicateContainerCommand cmd =
+        (ReplicateContainerCommand) cmds.values().iterator().next();
+
+    List<DatanodeDetails> sources = cmd.getSourceDatanodes();
+    Assertions.assertEquals(1, sources.size());
+    Assertions.assertEquals(maintReplica.getDatanodeDetails(),
+        cmd.getSourceDatanodes().get(0));
+  }
+
   public Map<DatanodeDetails, SCMCommand<?>>
       testUnderReplicationWithMissingIndexes(
       List<Integer> missingIndexes, Set<ContainerReplica> availableReplicas,
@@ -577,7 +628,6 @@ public class TestECUnderReplicationHandler {
     Mockito.when(result.isUnrecoverable()).thenReturn(false);
     Mockito.when(result.getContainerInfo()).thenReturn(container);
 
-    int remainingMaintenanceRedundancy = 1;
     Map<DatanodeDetails, SCMCommand<?>> datanodeDetailsSCMCommandMap = ecURH
         .processAndCreateCommands(availableReplicas, ImmutableList.of(),
             result, remainingMaintenanceRedundancy);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestECReplicationCheckHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestECReplicationCheckHandler.java
index 8b7aa665c8..8513eecee5 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestECReplicationCheckHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestECReplicationCheckHandler.java
@@ -20,6 +20,7 @@ import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -48,6 +49,7 @@ import java.util.Set;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
 import static 
org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaOp.PendingOpType.ADD;
@@ -274,6 +276,88 @@ public class TestECReplicationCheckHandler {
         ReplicationManagerReport.HealthState.MISSING));
   }
 
+  @Test
+  public void testUnderReplicatedAndUnrecoverableWithDecommission() {
+    testUnderReplicatedAndUnrecoverableWithOffline(DECOMMISSIONING);
+  }
+
+  @Test
+  public void testUnderReplicatedAndUnrecoverableWithMaintenance() {
+    testUnderReplicatedAndUnrecoverableWithOffline(ENTERING_MAINTENANCE);
+  }
+
+  private void testUnderReplicatedAndUnrecoverableWithOffline(
+      HddsProtos.NodeOperationalState offlineState) {
+    ContainerInfo container = createContainerInfo(repConfig);
+    Set<ContainerReplica> replicas = createReplicas(container.containerID(),
+        Pair.of(IN_SERVICE, 1), Pair.of(offlineState, 2));
+    ContainerCheckRequest request = requestBuilder
+        .setContainerReplicas(replicas)
+        .setContainerInfo(container)
+        .build();
+
+    UnderReplicatedHealthResult result = (UnderReplicatedHealthResult)
+        healthCheck.checkHealth(request);
+    Assert.assertEquals(HealthState.UNDER_REPLICATED, result.getHealthState());
+    Assert.assertEquals(-1, result.getRemainingRedundancy());
+    Assert.assertFalse(result.isReplicatedOkAfterPending());
+    Assert.assertFalse(result.underReplicatedDueToDecommission());
+    Assert.assertTrue(result.isUnrecoverable());
+    Assert.assertTrue(result.hasUnreplicatedOfflineIndexes());
+
+    Assert.assertTrue(healthCheck.handle(request));
+    // Unrecoverable so not added to the queue
+    Assert.assertEquals(1, repQueue.underReplicatedQueueSize());
+    Assert.assertEquals(0, repQueue.overReplicatedQueueSize());
+    Assert.assertEquals(1, report.getStat(
+        ReplicationManagerReport.HealthState.UNDER_REPLICATED));
+    Assert.assertEquals(1, report.getStat(
+        ReplicationManagerReport.HealthState.MISSING));
+  }
+
+  @Test
+  public void testUnderReplicatedAndUnrecoverableWithDecommissionPending() {
+    testUnderReplicatedAndUnrecoverableWithOffline(DECOMMISSIONING);
+  }
+
+  @Test
+  public void testUnderReplicatedAndUnrecoverableWithMaintenancePending() {
+    testUnderReplicatedAndUnrecoverableWithOffline(ENTERING_MAINTENANCE);
+  }
+
+  private void testUnderReplicatedAndUnrecoverableWithOfflinePending(
+      HddsProtos.NodeOperationalState offlineState) {
+    ContainerInfo container = createContainerInfo(repConfig);
+    Set<ContainerReplica> replicas = createReplicas(container.containerID(),
+        Pair.of(IN_SERVICE, 1), Pair.of(offlineState, 2));
+    List<ContainerReplicaOp> pending = new ArrayList<>();
+    pending.add(ContainerReplicaOp.create(
+        ADD, MockDatanodeDetails.randomDatanodeDetails(), 2));
+    ContainerCheckRequest request = requestBuilder
+        .setContainerReplicas(replicas)
+        .setContainerInfo(container)
+        .setPendingOps(pending)
+        .build();
+
+    UnderReplicatedHealthResult result = (UnderReplicatedHealthResult)
+        healthCheck.checkHealth(request);
+    Assert.assertEquals(HealthState.UNDER_REPLICATED, result.getHealthState());
+    Assert.assertEquals(-1, result.getRemainingRedundancy());
+    Assert.assertFalse(result.isReplicatedOkAfterPending());
+    Assert.assertFalse(result.underReplicatedDueToDecommission());
+    Assert.assertTrue(result.isUnrecoverable());
+    Assert.assertFalse(result.hasUnreplicatedOfflineIndexes());
+
+    Assert.assertTrue(healthCheck.handle(request));
+    // Unrecoverable so not added to the queue
+    Assert.assertEquals(0, repQueue.underReplicatedQueueSize());
+    Assert.assertEquals(0, repQueue.overReplicatedQueueSize());
+    Assert.assertEquals(1, report.getStat(
+        ReplicationManagerReport.HealthState.UNDER_REPLICATED));
+    Assert.assertEquals(1, report.getStat(
+        ReplicationManagerReport.HealthState.MISSING));
+  }
+
   /**
    * Tests that a closed EC 3-2 container with 3 closed and 2 unhealthy
    * replicas is under replicated.
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java
index b5d4d1158d..9b5c348595 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorTestUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
+import org.apache.commons.lang3.tuple.Triple;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -26,12 +28,14 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;
+import 
org.apache.hadoop.hdds.scm.container.replication.ECContainerReplicaCount;
 import 
org.apache.hadoop.hdds.scm.container.replication.RatisContainerReplicaCount;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.mockito.Mockito;
 
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -53,20 +57,24 @@ public final class DatanodeAdminMonitorTestUtil {
    * @param containerID The ID the replica is associated with
    * @param nodeState The persistedOpState stored in datanodeDetails.
    * @param replicaState The state of the generated replica.
+   * @param replicaIndex The replica Index for the replica.
+   * @param datanodeDetails The datanode the replica is hosted on.
    * @return A containerReplica with the given ID and state
    */
   public static ContainerReplica generateReplica(
       ContainerID containerID,
       HddsProtos.NodeOperationalState nodeState,
       StorageContainerDatanodeProtocolProtos.ContainerReplicaProto
-          .State replicaState) {
-    DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
-    dn.setPersistedOpState(nodeState);
+          .State replicaState,
+      int replicaIndex,
+      DatanodeDetails datanodeDetails) {
+    datanodeDetails.setPersistedOpState(nodeState);
     return ContainerReplica.newBuilder()
         .setContainerState(replicaState)
         .setContainerID(containerID)
         .setSequenceId(1)
-        .setDatanodeDetails(dn)
+        .setDatanodeDetails(datanodeDetails)
+        .setReplicaIndex(replicaIndex)
         .build();
   }
 
@@ -86,7 +94,8 @@ public final class DatanodeAdminMonitorTestUtil {
       HddsProtos.NodeOperationalState...states) {
     Set<ContainerReplica> replicas = new HashSet<>();
     for (HddsProtos.NodeOperationalState s : states) {
-      replicas.add(generateReplica(containerID, s, CLOSED));
+      replicas.add(generateReplica(containerID, s, CLOSED, 0,
+          MockDatanodeDetails.randomDatanodeDetails()));
     }
     ContainerInfo container = new ContainerInfo.Builder()
         .setContainerID(containerID.getId())
@@ -96,6 +105,39 @@ public final class DatanodeAdminMonitorTestUtil {
     return new RatisContainerReplicaCount(container, replicas, 0, 0, 3, 2);
   }
 
+  /**
+   * Create a ContainerReplicaCount object for an EC container, including a
+   * container with the requested ContainerID and state, along with a set of
+   * replicas of the given states.
+   * @param containerID The ID of the container to create an included
+   * @param repConfig The Replication Config for the container
+   * @param containerState The state of the container
+   * @param states Create a replica for each of the given states.
+   * @return A ContainerReplicaCount containing the generated container and
+   *         replica set
+   */
+  public static ContainerReplicaCount generateECReplicaCount(
+      ContainerID containerID, ECReplicationConfig repConfig,
+      HddsProtos.LifeCycleState containerState,
+      Triple<HddsProtos.NodeOperationalState, DatanodeDetails,
+          Integer>...states) {
+
+    Set<ContainerReplica> replicas = new HashSet<>();
+    for (Triple<HddsProtos.NodeOperationalState, DatanodeDetails, Integer> t
+        : states) {
+      replicas.add(generateReplica(containerID, t.getLeft(), CLOSED,
+          t.getRight(), t.getMiddle()));
+    }
+    ContainerInfo container = new ContainerInfo.Builder()
+        .setContainerID(containerID.getId())
+        .setState(containerState)
+        .setReplicationConfig(repConfig)
+        .build();
+
+    return new ECContainerReplicaCount(container, replicas,
+        Collections.emptyList(), 1);
+  }
+
   /**
    * The only interaction the DatanodeAdminMonitor has with the
    * ReplicationManager, is to request a ContainerReplicaCount object for each
@@ -120,6 +162,32 @@ public final class DatanodeAdminMonitorTestUtil {
                 containerState, replicaStates));
   }
 
+  /**
+   * The only interaction the DatanodeAdminMonitor has with the
+   * ReplicationManager, is to request a ContainerReplicaCount object for each
+   * container on nodes being deocmmissioned or moved to maintenance. This
+   * method mocks that interface to return a ContainerReplicaCount with a
+   * container in the given containerState and a set of replias in the given
+   * replicaStates.
+   * @param containerState
+   * @param replicaStates
+   * @throws ContainerNotFoundException
+   */
+  public static void mockGetContainerReplicaCountForEC(
+      ReplicationManager repManager,
+      HddsProtos.LifeCycleState containerState,
+      ECReplicationConfig repConfig,
+      Triple<HddsProtos.NodeOperationalState, DatanodeDetails,
+          Integer>...replicaStates)
+      throws ContainerNotFoundException {
+    reset(repManager);
+    Mockito.when(repManager.getContainerReplicaCount(
+            Mockito.any(ContainerID.class)))
+        .thenAnswer(invocation ->
+            generateECReplicaCount((ContainerID)invocation.getArguments()[0],
+                repConfig, containerState, replicaStates));
+  }
+
   /**
    * This simple internal class is used to track and handle any DatanodeAdmin
    * events fired by the DatanodeAdminMonitor during tests.
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index e2b4e13037..327ae26de2 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
+import org.apache.commons.lang3.tuple.Triple;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -36,6 +38,7 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
@@ -206,6 +209,63 @@ public class TestDatanodeAdminMonitor {
         nodeManager.getNodeStatus(dn1).getOperationalState());
   }
 
+  @Test
+  public void testDecommissionNodeWithUnrecoverableECContainer()
+      throws NodeNotFoundException, ContainerNotFoundException {
+    DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+    nodeManager.register(dn1,
+        new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+            HddsProtos.NodeState.HEALTHY));
+
+    nodeManager.setContainers(dn1, generateContainers(1));
+    // Mock Replication Manager to return ContainerReplicaCount's which
+    // always have a DECOMMISSIONED replica.
+    DatanodeAdminMonitorTestUtil
+        .mockGetContainerReplicaCountForEC(
+            repManager,
+            HddsProtos.LifeCycleState.CLOSED,
+            new ECReplicationConfig(3, 2),
+            Triple.of(DECOMMISSIONING, dn1, 1),
+            Triple.of(IN_SERVICE,
+                MockDatanodeDetails.randomDatanodeDetails(), 2));
+
+    // Run the monitor for the first time and the node will transition to
+    // REPLICATE_CONTAINERS as there are no pipelines to close.
+    monitor.startMonitoring(dn1);
+    monitor.run();
+    DatanodeDetails node = getFirstTrackedNode();
+    assertEquals(1, monitor.getTrackedNodeCount());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dn1).getOperationalState());
+
+    // Running the monitor again causes it to remain DECOMMISSIONING
+    // as nothing has changed.
+    monitor.run();
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+        nodeManager.getNodeStatus(dn1).getOperationalState());
+
+    // Now change the replicationManager mock another copy of the
+    // decommissioning replica on an IN_SERVICE node and the node should
+    // complete the REPLICATE_CONTAINERS step, moving to complete which will 
end
+    // the decommission workflow
+    DatanodeAdminMonitorTestUtil
+        .mockGetContainerReplicaCountForEC(
+            repManager,
+            HddsProtos.LifeCycleState.CLOSED,
+            new ECReplicationConfig(3, 2),
+            Triple.of(DECOMMISSIONING, dn1, 1),
+            Triple.of(IN_SERVICE,
+                MockDatanodeDetails.randomDatanodeDetails(), 2),
+            Triple.of(IN_SERVICE,
+                MockDatanodeDetails.randomDatanodeDetails(), 1));
+
+    monitor.run();
+
+    assertEquals(0, monitor.getTrackedNodeCount());
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED,
+        nodeManager.getNodeStatus(dn1).getOperationalState());
+  }
+
   @Test
   public void testDecommissionAbortedWhenNodeInUnexpectedState()
       throws NodeNotFoundException, ContainerNotFoundException {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to