sodonnel commented on code in PR #4025:
URL: https://github.com/apache/ozone/pull/4025#discussion_r1037307309


##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisUnderReplicationHandler.java:
##########
@@ -0,0 +1,247 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
+import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * This class handles Ratis containers that are under replicated. It should
+ * be used to obtain SCMCommands that can be sent to datanodes to solve
+ * under replication.
+ */
+public class RatisUnderReplicationHandler
+    implements UnhealthyReplicationHandler {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(RatisUnderReplicationHandler.class);
+  private final PlacementPolicy placementPolicy;
+  private final NodeManager nodeManager;
+  private final long currentContainerSize;
+
+  public RatisUnderReplicationHandler(final PlacementPolicy placementPolicy,
+      final ConfigurationSource conf, final NodeManager nodeManager) {
+    this.placementPolicy = placementPolicy;
+    this.currentContainerSize = (long) conf
+        .getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
+            ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
+    this.nodeManager = nodeManager;
+  }
+
+  /**
+   * Identifies new set of datanodes as targets for container replication.
+   * Forms the SCMCommands to be sent to these datanodes.
+   *
+   * @param replicas Set of container replicas.
+   * @param pendingOps Pending ContainerReplicaOp including adds and deletes
+   *                   for this container.
+   * @param result Health check result indicating under replication.
+   * @param minHealthyForMaintenance Number of healthy replicas that must be
+   *                                 available for a DN to enter maintenance
+   *
+   * @return Returns the key value pair of destination dn where the command 
gets
+   * executed and the command itself. If an empty map is returned, it indicates
+   * the container is no longer unhealthy and can be removed from the unhealthy
+   * queue. Any exception indicates that the container is still unhealthy and
+   * should be retried later.
+   */
+  @Override
+  public Map<DatanodeDetails, SCMCommand<?>> processAndCreateCommands(
+      Set<ContainerReplica> replicas, List<ContainerReplicaOp> pendingOps,
+      ContainerHealthResult result, int minHealthyForMaintenance)
+      throws IOException {
+    ContainerInfo containerInfo = result.getContainerInfo();
+    LOG.debug("Handling under replicated Ratis container {}", containerInfo);
+
+    // count pending adds and deletes
+    int pendingAdd = 0, pendingDelete = 0;
+    for (ContainerReplicaOp op : pendingOps) {
+      if (op.getOpType() == ContainerReplicaOp.PendingOpType.ADD) {
+        pendingAdd++;
+      } else if (op.getOpType() == ContainerReplicaOp.PendingOpType.DELETE) {
+        pendingDelete++;
+      }
+    }
+    RatisContainerReplicaCount replicaCount =
+        new RatisContainerReplicaCount(containerInfo, replicas, pendingAdd,
+            pendingDelete, containerInfo.getReplicationFactor().getNumber(),
+            minHealthyForMaintenance);
+
+    // verify that this container is still under replicated and we don't have
+    // sufficient replication after considering pending adds
+    if (!verifyUnderReplication(replicaCount)) {
+      return Collections.emptyMap();
+    }
+
+    // find sources that can provide replicas
+    List<DatanodeDetails> sourceDatanodes =
+        getSources(replicaCount, pendingOps);
+    if (sourceDatanodes.isEmpty()) {
+      LOG.warn("Cannot replicate container {} because no healthy replicas " +
+          "were found.", containerInfo);
+      return Collections.emptyMap();
+    }
+
+    // find targets to send replicas to
+    List<DatanodeDetails> targetDatanodes =
+        getTargets(replicaCount, pendingOps);
+    if (targetDatanodes.isEmpty()) {
+      LOG.warn("Cannot replicate container {} because no eligible targets " +
+          "were found.", containerInfo);
+      return Collections.emptyMap();
+    }
+
+    return createReplicationCommands(containerInfo.getContainerID(),
+        sourceDatanodes, targetDatanodes);
+  }
+
+  /**
+   * Verify that this container is under replicated, even after considering
+   * pending adds. Note that the container might be under replicated but
+   * unrecoverable (no replicas), in which case this returns false.
+   *
+   * @param replicaCount RatisContainerReplicaCount object to check
+   * @return true if the container is under replicated, false if the
+   * container is sufficiently replicated or unrecoverable.
+   */
+  private boolean verifyUnderReplication(
+      RatisContainerReplicaCount replicaCount) {
+    if (replicaCount.isSufficientlyReplicated()) {
+      LOG.info("The container {} state changed and it's not under " +
+          "replicated any more.", replicaCount.getContainer().containerID());
+      return false;
+    }
+    if (replicaCount.isSufficientlyReplicated(true)) {
+      LOG.info("Container {} with replicas {} will be sufficiently " +
+              "replicated after pending replicas are created.",
+          replicaCount.getContainer().getContainerID(),
+          replicaCount.getReplicas());
+      return false;
+    }
+    if (replicaCount.getReplicas().isEmpty()) {
+      LOG.warn("Container {} does not have any replicas and is unrecoverable" +
+          ".", replicaCount.getContainer());
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Returns a list of datanodes that can be used as sources for replication
+   * for the container specified in replicaCount.
+   *
+   * @param replicaCount RatisContainerReplicaCount object for this container
+   * @param pendingOps List of pending ContainerReplicaOp
+   * @return List of healthy datanodes that have closed/quasi-closed replicas
+   * and are not pending replica deletion. Sorted in descending order of
+   * sequence id.
+   */
+  private List<DatanodeDetails> getSources(
+      RatisContainerReplicaCount replicaCount,
+      List<ContainerReplicaOp> pendingOps) {
+    Set<DatanodeDetails> pendingDeletion = new HashSet<>();
+    // collect the DNs that are going to have their container replica deleted
+    for (ContainerReplicaOp op : pendingOps) {
+      if (op.getOpType() == ContainerReplicaOp.PendingOpType.DELETE) {
+        pendingDeletion.add(op.getTarget());
+      }
+    }
+
+    /*
+     * Return healthy datanodes that have closed/quasi-closed replicas and
+     * are not pending replica deletion. Sorted in descending order of
+     * sequence id.
+     */
+    return replicaCount.getReplicas().stream()
+        .filter(r -> r.getState() == State.QUASI_CLOSED ||
+            r.getState() == State.CLOSED)
+        .filter(r -> ReplicationManager.getNodeStatus(r.getDatanodeDetails(),
+            nodeManager).isHealthy())
+        .filter(r -> !pendingDeletion.contains(r.getDatanodeDetails()))
+        .sorted((r1, r2) -> r2.getSequenceId().compareTo(r1.getSequenceId()))
+        .map(ContainerReplica::getDatanodeDetails)
+        .collect(Collectors.toList());
+  }
+
+  private List<DatanodeDetails> getTargets(
+      RatisContainerReplicaCount replicaCount,
+      List<ContainerReplicaOp> pendingOps) throws IOException {
+    // DNs that already have replicas cannot be targets and should be excluded
+    final List<DatanodeDetails> excludeList =
+        replicaCount.getReplicas().stream()
+            .map(ContainerReplica::getDatanodeDetails)
+            .collect(Collectors.toList());
+
+    // DNs that are already waiting to receive replicas cannot be targets
+    final List<DatanodeDetails> pendingReplication =
+        pendingOps.stream()
+            .filter(containerReplicaOp -> containerReplicaOp.getOpType() ==
+                ContainerReplicaOp.PendingOpType.ADD)
+            .map(ContainerReplicaOp::getTarget)
+            .collect(Collectors.toList());
+    excludeList.addAll(pendingReplication);
+
+    /*
+    Ensure that target datanodes have enough space to hold a complete
+    container.
+    */
+    final long dataSizeRequired =
+        Math.max(replicaCount.getContainer().getUsedBytes(),
+            currentContainerSize);
+    return placementPolicy.chooseDatanodes(excludeList, null,
+        replicaCount.additionalReplicaNeeded(), 0, dataSizeRequired);
+
+    // TODO Now that under replication handling is separate from mis

Review Comment:
   We probably need to review how the placement policy works, but I think what 
is *should* do is:
   
   1. If there are multiple racks online, it must either give a node which 
meets the policy
   2. Or, throw SCMException with insufficient nodes, which means we cannot 
recover now.
   3. If there are not enough racks online, eg a 2 rack cluster and one rack is 
offline, then it should give a node which violates the policy. This is because 
fixing under replication is more important than causing mis-replication.
   
   Its not a great design if we have to check the node we are given actually 
meets the policy here again.
   
   An alternative to the above, is to have an "allowed to voilate policy" flag 
we can pass into the policy. That way, when we are choosing nodes it will 
always throw if it cannot find a node that meets the policy, and we can catch 
that and try again with the flag set to true, but that may be overkill.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to