This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 5d578d0  HDDS-1004. SCMContainerManager#updateContainerStateInternal 
fails for QUASI_CLOSE and FORCE_CLOSE events. Contributed by Lokesh Jain.
5d578d0 is described below

commit 5d578d0c4a0d9184dc5d54589ecafc91c0ec16cc
Author: Shashikant Banerjee <shashik...@apache.org>
AuthorDate: Tue Jan 29 14:11:56 2019 +0530

    HDDS-1004. SCMContainerManager#updateContainerStateInternal fails for 
QUASI_CLOSE and FORCE_CLOSE events. Contributed by Lokesh Jain.
---
 .../hdds/scm/container/ContainerReportHandler.java | 25 +++++++++++-----------
 .../hdds/scm/container/SCMContainerManager.java    | 21 ------------------
 .../hdds/scm/pipeline/RatisPipelineUtils.java      |  2 +-
 3 files changed, 13 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 0170caa..4500786 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -127,8 +127,8 @@ public class ContainerReportHandler implements
                 }
               });
         } catch (ContainerNotFoundException e) {
-          LOG.warn("Cannot remove container replica, container {} not found",
-              id);
+          LOG.warn("Cannot remove container replica, container {} not found 
{}",
+              id, e);
         }
       }
 
@@ -140,8 +140,8 @@ public class ContainerReportHandler implements
       missingReplicas.forEach(id -> checkReplicationState(id, publisher));
 
     } catch (NodeNotFoundException ex) {
-      LOG.error("Received container report from unknown datanode {}",
-          datanodeDetails);
+      LOG.error("Received container report from unknown datanode {} {}",
+          datanodeDetails, ex);
     }
 
   }
@@ -170,12 +170,13 @@ public class ContainerReportHandler implements
                   containerInfo.getContainerID());
         }
       } catch (ContainerNotFoundException e) {
-        LOG.error("Received container report for an unknown container {} from" 
+
-                " datanode {}", replicaProto.getContainerID(), 
datanodeDetails);
+        LOG.error("Received container report for an unknown container {} from"
+                + " datanode {} {}", replicaProto.getContainerID(),
+            datanodeDetails, e);
       } catch (IOException e) {
-        LOG.error("Exception while processing container report for container" +
-                " {} from datanode {}",
-            replicaProto.getContainerID(), datanodeDetails);
+        LOG.error("Exception while processing container report for container"
+                + " {} from datanode {} {}", replicaProto.getContainerID(),
+            datanodeDetails, e);
       }
     }
     if (pendingDeleteStatusList.getNumPendingDeletes() > 0) {
@@ -190,10 +191,8 @@ public class ContainerReportHandler implements
       ContainerInfo container = containerManager.getContainer(containerID);
       replicateIfNeeded(container, publisher);
     } catch (ContainerNotFoundException ex) {
-      LOG.warn(
-          "Container is missing from containerStateManager. Can't request "
-              + "replication. {}",
-          containerID);
+      LOG.warn("Container is missing from containerStateManager. Can't request 
"
+          + "replication. {} {}", containerID, ex);
     }
 
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 6c7031d..6ab4cdf 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -313,27 +313,6 @@ public class SCMContainerManager implements 
ContainerManager {
 
   private ContainerInfo updateContainerStateInternal(ContainerID containerID,
       HddsProtos.LifeCycleEvent event) throws IOException {
-    // Refactor the below code for better clarity.
-    switch (event) {
-    case FINALIZE:
-      // TODO: we don't need a lease manager here for closing as the
-      // container report will include the container state after HDFS-13008
-      // If a client failed to update the container close state, DN container
-      // report from 3 DNs will be used to close the container eventually.
-      break;
-    case CLOSE:
-      break;
-    case DELETE:
-      break;
-    case CLEANUP:
-      break;
-    default:
-      throw new SCMException("Unsupported container LifeCycleEvent.",
-          FAILED_TO_CHANGE_CONTAINER_STATE);
-    }
-    // If the below updateContainerState call fails, we should revert the
-    // changes made in switch case.
-    // Like releasing the lease in case of BEGIN_CREATE.
     return containerStateManager.updateContainerState(containerID, event);
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
index 58ca1fd..26ba1f4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
@@ -106,7 +106,7 @@ public final class RatisPipelineUtils {
       Pipeline pipeline, Configuration ozoneConf, boolean onTimeout)
       throws IOException {
     final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
-    LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);
+    LOG.info("destroying pipeline:{} with {}", pipeline.getId(), group);
     pipelineManager.finalizePipeline(pipeline.getId());
     if (onTimeout) {
       long pipelineDestroyTimeoutInMillis = ozoneConf


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to