HDDS-366. Update functions impacted by SCM chill mode in 
StorageContainerLocationProtocol. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/295cce39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/295cce39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/295cce39

Branch: refs/heads/YARN-7402
Commit: 295cce39edfce1e81d3a1fdcb4e70442c4f60c18
Parents: 2cf8927
Author: Xiaoyu Yao <x...@apache.org>
Authored: Tue Sep 18 13:28:04 2018 -0700
Committer: Xiaoyu Yao <x...@apache.org>
Committed: Tue Sep 18 13:32:48 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/common/src/main/proto/hdds.proto    |  7 +++
 .../org/apache/hadoop/hdds/scm/ScmUtils.java    | 45 +++++++++++++++
 .../hadoop/hdds/scm/block/BlockManagerImpl.java | 17 +-----
 .../hdds/scm/container/ContainerMapping.java    |  6 --
 .../hdds/scm/server/ChillModePrecheck.java      | 12 ++--
 .../hdds/scm/server/SCMChillModeManager.java    |  1 +
 .../scm/server/SCMClientProtocolServer.java     | 53 ++++++++++++++++-
 .../scm/server/StorageContainerManager.java     | 56 ++++++++++--------
 .../scm/container/TestContainerMapping.java     |  8 ---
 .../scm/server/TestSCMClientProtocolServer.java | 60 ++++++++++++++++++++
 .../ozone/TestStorageContainerManager.java      | 55 ++++++++++++++++++
 .../hadoop/ozone/scm/TestContainerSQLCli.java   | 15 +++--
 12 files changed, 273 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto 
b/hadoop-hdds/common/src/main/proto/hdds.proto
index 41f1851..dedc57b 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -176,6 +176,13 @@ enum ScmOps {
     keyBlocksInfoList = 2;
     getScmInfo = 3;
     deleteBlock = 4;
+    createReplicationPipeline = 5;
+    allocateContainer = 6;
+    getContainer = 7;
+    getContainerWithPipeline = 8;
+    listContainer = 9;
+    deleteContainer = 10;
+    queryNode = 11;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
new file mode 100644
index 0000000..435f0a5
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.server.Precheck;
+
+/**
+ * SCM utility class.
+ */
+public final class ScmUtils {
+
+  private ScmUtils() {
+  }
+
+  /**
+   * Perform all prechecks for given scm operation.
+   *
+   * @param operation
+   * @param preChecks prechecks to be performed
+   */
+  public static void preCheck(ScmOps operation, Precheck... preChecks)
+      throws SCMException {
+    for (Precheck preCheck : preChecks) {
+      preCheck.check(operation);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 3405b0d..d383c68 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmUtils;
 import org.apache.hadoop.hdds.scm.container.Mapping;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
@@ -30,7 +31,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.server.ChillModePrecheck;
-import org.apache.hadoop.hdds.scm.server.Precheck;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.metrics2.util.MBeans;
@@ -190,7 +190,7 @@ public class BlockManagerImpl implements 
EventHandler<Boolean>,
       ReplicationType type, ReplicationFactor factor, String owner)
       throws IOException {
     LOG.trace("Size;{} , type : {}, factor : {} ", size, type, factor);
-    preCheck(ScmOps.allocateBlock, chillModePrecheck);
+    ScmUtils.preCheck(ScmOps.allocateBlock, chillModePrecheck);
     if (size < 0 || size > containerSize) {
       LOG.warn("Invalid block size requested : {}", size);
       throw new SCMException("Unsupported block size: " + size,
@@ -417,19 +417,6 @@ public class BlockManagerImpl implements 
EventHandler<Boolean>,
     return this.blockDeletingService;
   }
 
-  /**
-   * Perform all prechecks for given operations.
-   *
-   * @param operation
-   * @param preChecks prechecks to be performed
-   */
-  public void preCheck(ScmOps operation, Precheck... preChecks)
-      throws SCMException {
-    for (Precheck preCheck : preChecks) {
-      preCheck.check(operation);
-    }
-  }
-
   @Override
   public void onMessage(Boolean inChillMode, EventPublisher publisher) {
     this.chillModePrecheck.setInChillMode(inChillMode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 11cc9ee..206e24b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -274,12 +274,6 @@ public class ContainerMapping implements Mapping {
     ContainerInfo containerInfo;
     ContainerWithPipeline containerWithPipeline;
 
-    if (!nodeManager.isOutOfChillMode()) {
-      throw new SCMException(
-          "Unable to create container while in chill mode",
-          SCMException.ResultCodes.CHILL_MODE_EXCEPTION);
-    }
-
     lock.lock();
     try {
       containerWithPipeline = containerStateManager.allocateContainer(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ChillModePrecheck.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ChillModePrecheck.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ChillModePrecheck.java
index 81556fa..b92413e 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ChillModePrecheck.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ChillModePrecheck.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.server;
 
+import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
@@ -28,15 +29,16 @@ import 
org.apache.hadoop.hdds.scm.server.SCMChillModeManager.ChillModeRestricted
  * */
 public class ChillModePrecheck implements Precheck<ScmOps> {
 
-  private boolean inChillMode;
+  private AtomicBoolean inChillMode = new AtomicBoolean(true);
   public static final String PRECHECK_TYPE = "ChillModePrecheck";
 
   public boolean check(ScmOps op) throws SCMException {
-    if(inChillMode && ChillModeRestrictedOps.isRestrictedInChillMode(op)) {
+    if (inChillMode.get() && ChillModeRestrictedOps
+        .isRestrictedInChillMode(op)) {
       throw new SCMException("ChillModePrecheck failed for " + op,
           ResultCodes.CHILL_MODE_EXCEPTION);
     }
-    return inChillMode;
+    return inChillMode.get();
   }
 
   @Override
@@ -45,10 +47,10 @@ public class ChillModePrecheck implements Precheck<ScmOps> {
   }
 
   public boolean isInChillMode() {
-    return inChillMode;
+    return inChillMode.get();
   }
 
   public void setInChillMode(boolean inChillMode) {
-    this.inChillMode = inChillMode;
+    this.inChillMode.set(inChillMode);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMChillModeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMChillModeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMChillModeManager.java
index 0b7bfda..b35ac1b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMChillModeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMChillModeManager.java
@@ -234,6 +234,7 @@ public class SCMChillModeManager implements
 
     static {
       restrictedOps.add(ScmOps.allocateBlock);
+      restrictedOps.add(ScmOps.allocateContainer);
     }
 
     public static boolean isRestrictedInChillMode(ScmOps opName) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 77e495d..3f1943c 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -27,15 +27,21 @@ import com.google.protobuf.BlockingService;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerLocationProtocolProtos;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.ScmUtils;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
@@ -71,13 +77,14 @@ import static 
org.apache.hadoop.hdds.scm.server.StorageContainerManager
  * The RPC server that listens to requests from clients.
  */
 public class SCMClientProtocolServer implements
-    StorageContainerLocationProtocol {
+    StorageContainerLocationProtocol, EventHandler<Boolean> {
   private static final Logger LOG =
       LoggerFactory.getLogger(SCMClientProtocolServer.class);
   private final RPC.Server clientRpcServer;
   private final InetSocketAddress clientRpcAddress;
   private final StorageContainerManager scm;
   private final OzoneConfiguration conf;
+  private ChillModePrecheck chillModePrecheck = new ChillModePrecheck();
 
   public SCMClientProtocolServer(OzoneConfiguration conf,
       StorageContainerManager scm) throws IOException {
@@ -149,6 +156,7 @@ public class SCMClientProtocolServer implements
   public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType
       replicationType, HddsProtos.ReplicationFactor factor,
       String owner) throws IOException {
+    ScmUtils.preCheck(ScmOps.allocateContainer, chillModePrecheck);
     String remoteUser = getRpcRemoteUsername();
     getScm().checkAdminAccess(remoteUser);
 
@@ -167,12 +175,39 @@ public class SCMClientProtocolServer implements
   @Override
   public ContainerWithPipeline getContainerWithPipeline(long containerID)
       throws IOException {
+    if (chillModePrecheck.isInChillMode()) {
+      ContainerInfo contInfo = scm.getScmContainerManager()
+          .getContainer(containerID);
+      if (contInfo.isContainerOpen()) {
+        if (!hasRequiredReplicas(contInfo)) {
+          throw new SCMException("Open container " + containerID + " doesn't"
+              + " have enough replicas to service this operation in "
+              + "Chill mode.", ResultCodes.CHILL_MODE_EXCEPTION);
+        }
+      }
+    }
     String remoteUser = getRpcRemoteUsername();
     getScm().checkAdminAccess(remoteUser);
     return scm.getScmContainerManager()
         .getContainerWithPipeline(containerID);
   }
 
+  /**
+   * Check if container reported replicas are equal or greater than required
+   * replication factor.
+   */
+  private boolean hasRequiredReplicas(ContainerInfo contInfo) {
+    try{
+      return getScm().getScmContainerManager().getStateManager()
+          .getContainerReplicas(contInfo.containerID())
+          .size() >= contInfo.getReplicationFactor().getNumber();
+    } catch (SCMException ex) {
+      // getContainerReplicas throws exception if no replica's exist for given
+      // container.
+      return false;
+    }
+  }
+
   @Override
   public List<ContainerInfo> listContainer(long startContainerID,
       int count) throws IOException {
@@ -291,6 +326,22 @@ public class SCMClientProtocolServer implements
   }
 
   /**
+   * Set chill mode status based on SCMEvents.CHILL_MODE_STATUS event.
+   */
+  @Override
+  public void onMessage(Boolean inChillMOde, EventPublisher publisher) {
+    chillModePrecheck.setInChillMode(inChillMOde);
+  }
+
+  /**
+   * Set chill mode status based on .
+   */
+  public boolean getChillModeStatus() {
+    return chillModePrecheck.isInChillMode();
+  }
+
+
+  /**
    * Query the System for Nodes.
    *
    * @param nodeState - NodeState that we are interested in matching.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 9216e5d..8e76606 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineCloseHandler;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineActionEventHandler;
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -241,29 +242,6 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     PipelineCloseHandler pipelineCloseHandler =
         new PipelineCloseHandler(scmContainerManager);
 
-    eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
-    eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
-    eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler);
-    eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
-    eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
-    eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);
-    eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);
-    eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
-    eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
-    eventQueue.addHandler(SCMEvents.START_REPLICATION,
-        replicationStatus.getReplicationStatusListener());
-    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS,
-        replicationStatus.getChillModeStatusListener());
-    eventQueue
-        .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler);
-    eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS,
-        pipelineActionEventHandler);
-    eventQueue.addHandler(SCMEvents.PIPELINE_CLOSE, pipelineCloseHandler);
-    eventQueue.addHandler(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
-        scmChillModeManager);
-    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS,
-        (BlockManagerImpl) scmBlockManager);
-
     long watcherTimeout =
         conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
             HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
@@ -298,6 +276,31 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     blockProtocolServer = new SCMBlockProtocolServer(conf, this);
     clientProtocolServer = new SCMClientProtocolServer(conf, this);
     httpServer = new StorageContainerManagerHttpServer(conf);
+
+    eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
+    eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
+    eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler);
+    eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
+    eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
+    eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);
+    eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);
+    eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
+    eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
+    eventQueue.addHandler(SCMEvents.START_REPLICATION,
+        replicationStatus.getReplicationStatusListener());
+    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS,
+        replicationStatus.getChillModeStatusListener());
+    eventQueue
+        .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler);
+    eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS,
+        pipelineActionEventHandler);
+    eventQueue.addHandler(SCMEvents.PIPELINE_CLOSE, pipelineCloseHandler);
+    eventQueue.addHandler(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
+        scmChillModeManager);
+    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS,
+        (BlockManagerImpl) scmBlockManager);
+    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, clientProtocolServer);
+
     registerMXBean();
   }
 
@@ -830,6 +833,13 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     return scmChillModeManager.getInChillMode();
   }
 
+  /**
+   * Returns EventPublisher.
+   */
+  public EventPublisher getEventQueue(){
+    return eventQueue;
+  }
+
   @VisibleForTesting
   public double getCurrentContainerThreshold() {
     return scmChillModeManager.getCurrentContainerThreshold();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index 481f94c..224f6ddd 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -206,14 +206,6 @@ public class TestContainerMapping {
   }
 
   @Test
-  public void testChillModeAllocateContainerFails() throws IOException {
-    nodeManager.setChillmode(true);
-    thrown.expectMessage("Unable to create container while in chill mode");
-    mapping.allocateContainer(xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerOwner);
-  }
-
-  @Test
   public void testContainerCreationLeaseTimeout() throws IOException,
       InterruptedException {
     nodeManager.setChillmode(false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
new file mode 100644
index 0000000..4b20018
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for @{@link SCMClientProtocolServer}.
+ * */
+public class TestSCMClientProtocolServer {
+  private SCMClientProtocolServer scmClientProtocolServer;
+  private OzoneConfiguration config;
+  private EventQueue eventQueue;
+
+  @Before
+  public void setUp() throws Exception {
+    config = new OzoneConfiguration();
+    eventQueue = new EventQueue();
+    scmClientProtocolServer = new SCMClientProtocolServer(config, null);
+    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, 
scmClientProtocolServer);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testAllocateContainerFailureInChillMode() throws Exception {
+    LambdaTestUtils.intercept(SCMException.class,
+        "hillModePrecheck failed for allocateContainer", () -> {
+          scmClientProtocolServer.allocateContainer(
+              ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "");
+        });
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 1364d77..3d9a043 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -43,6 +43,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -53,6 +55,7 @@ import 
org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.server.SCMChillModeManager;
@@ -66,6 +69,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -551,4 +555,55 @@ public class TestStorageContainerManager {
     cluster.shutdown();
   }
 
+  @Test
+  public void testSCMChillModeRestrictedOp() throws Exception {
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
+        OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB);
+
+    MiniOzoneClusterImpl cluster = (MiniOzoneClusterImpl) MiniOzoneCluster
+        .newBuilder(conf)
+        .setHbInterval(1000)
+        .setHbProcessorInterval(500)
+        .setStartDataNodes(false)
+        .build();
+
+    StorageContainerManager scm = cluster.getStorageContainerManager();
+    assertTrue(scm.isInChillMode());
+
+    LambdaTestUtils.intercept(SCMException.class,
+        "ChillModePrecheck failed for allocateContainer", () -> {
+          scm.getClientProtocolServer()
+              .allocateContainer(ReplicationType.STAND_ALONE,
+                  ReplicationFactor.ONE, "");
+        });
+
+    cluster.startHddsDatanodes();
+    cluster.waitForClusterToBeReady();
+    assertFalse(scm.isInChillMode());
+
+    TestStorageContainerManagerHelper helper =
+        new TestStorageContainerManagerHelper(cluster, conf);
+    helper.createKeys(10, 4096);
+    SCMClientProtocolServer clientProtocolServer = cluster
+        .getStorageContainerManager().getClientProtocolServer();
+
+    final List<ContainerInfo> containers = scm.getScmContainerManager()
+        .getStateManager().getAllContainers();
+    scm.getEventQueue().fireEvent(SCMEvents.CHILL_MODE_STATUS, true);
+    assertFalse((scm.getClientProtocolServer()).getChillModeStatus());
+    GenericTestUtils.waitFor(() -> {
+      return clientProtocolServer.getChillModeStatus();
+    }, 50, 1000 * 5);
+    assertTrue(clientProtocolServer.getChillModeStatus());
+
+    LambdaTestUtils.intercept(SCMException.class,
+        "Open container " + containers.get(0).getContainerID() + " "
+            + "doesn't have enough replicas to service this operation in Chill"
+            + " mode.", () -> clientProtocolServer
+            .getContainerWithPipeline(containers.get(0).getContainerID()));
+    cluster.shutdown();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/295cce39/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index 65bd036..4026348 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.scm;
 
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -60,6 +61,8 @@ import static org.junit.Assert.assertEquals;
 @RunWith(Parameterized.class)
 public class TestContainerSQLCli {
 
+  private EventQueue eventQueue;
+
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
     return Arrays.asList(new Object[][] {
@@ -114,12 +117,16 @@ public class TestContainerSQLCli {
         .getDatanodeDetails().getIpAddress();
     cluster.getOzoneManager().stop();
     cluster.getStorageContainerManager().stop();
-
+    eventQueue = new EventQueue();
     nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
     mapping = new ContainerMapping(conf, nodeManager, 128,
-        new EventQueue());
-    blockManager = new BlockManagerImpl(conf, nodeManager, mapping, null);
-
+        eventQueue);
+    blockManager = new BlockManagerImpl(conf, nodeManager, mapping, 
eventQueue);
+    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);
+    eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
+    GenericTestUtils.waitFor(() -> {
+      return !blockManager.isScmInChillMode();
+    }, 10, 1000 * 15);
     // blockManager.allocateBlock() will create containers if there is none
     // stored in levelDB. The number of containers to create is the value of
     // OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE which we set to 2.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to