Repository: hadoop
Updated Branches:
  refs/heads/trunk ad5256e44 -> c4d964002


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 631283c..1ff6655 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
@@ -960,7 +959,7 @@ public class TestNodeManager {
                   TestUtils.getRandomPipelineReports());
       eq.fireEvent(DATANODE_COMMAND,
           new CommandForDatanode<>(datanodeDetails.getUuid(),
-              new CloseContainerCommand(1L, ReplicationType.STAND_ALONE,
+              new CloseContainerCommand(1L,
                   PipelineID.randomId())));
 
       eq.processAll(1000L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 0f35607..16c9f22 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -21,7 +21,6 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
@@ -66,6 +65,7 @@ import 
org.apache.hadoop.ozone.container.common.states.endpoint
 import org.apache.hadoop.ozone.container.common.states.endpoint
     .VersionEndpointTask;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -75,6 +75,7 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static org.mockito.Mockito.mock;
@@ -309,8 +310,10 @@ public class TestEndPoint {
     OzoneContainer ozoneContainer = mock(OzoneContainer.class);
     when(ozoneContainer.getNodeReport()).thenReturn(TestUtils
         .createNodeReport(getStorageReports(UUID.randomUUID())));
-    when(ozoneContainer.getContainerReport()).thenReturn(
+    ContainerController controller = Mockito.mock(ContainerController.class);
+    when(controller.getContainerReport()).thenReturn(
         TestUtils.getRandomContainerReports(10));
+    when(ozoneContainer.getController()).thenReturn(controller);
     when(ozoneContainer.getPipelineReport()).thenReturn(
             TestUtils.getRandomPipelineReports());
     RegisterEndpointTask endpointTask =
@@ -433,7 +436,6 @@ public class TestEndPoint {
         .setCloseContainerCommandProto(
             CloseContainerCommandProto.newBuilder().setCmdId(1)
         .setContainerID(1)
-        .setReplicationType(ReplicationType.RATIS)
         .setPipelineID(PipelineID.randomId().getProtobuf())
         .build())
         .setCommandType(Type.closeContainerCommand)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot 
b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
index fb58520..0162f9e 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
@@ -42,6 +42,6 @@ Check webui static resources
                        Should contain         ${result}    200
 
 Start freon testing
-    ${result} =        Execute              ozone freon randomkeys 
--numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 --numOfThreads 10
+    ${result} =        Execute              ozone freon randomkeys 
--numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 --numOfThreads 1
                        Wait Until Keyword Succeeds      3min       10sec     
Should contain   ${result}   Number of Keys added: 125
                        Should Not Contain               ${result}  ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 3083660..6427dae 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -176,14 +176,14 @@ public class TestMiniOzoneCluster {
             TestUtils.randomDatanodeDetails(), ozoneConf)
     ) {
       HashSet<Integer> ports = new HashSet<Integer>();
-      assertTrue(ports.add(sm1.getContainer().getContainerServerPort()));
-      assertTrue(ports.add(sm2.getContainer().getContainerServerPort()));
-      assertTrue(ports.add(sm3.getContainer().getContainerServerPort()));
+      assertTrue(ports.add(sm1.getContainer().getReadChannel().getIPCPort()));
+      assertTrue(ports.add(sm2.getContainer().getReadChannel().getIPCPort()));
+      assertTrue(ports.add(sm3.getContainer().getReadChannel().getIPCPort()));
 
       // Assert that ratis is also on a different port.
-      assertTrue(ports.add(sm1.getContainer().getRatisContainerServerPort()));
-      assertTrue(ports.add(sm2.getContainer().getRatisContainerServerPort()));
-      assertTrue(ports.add(sm3.getContainer().getRatisContainerServerPort()));
+      assertTrue(ports.add(sm1.getContainer().getWriteChannel().getIPCPort()));
+      assertTrue(ports.add(sm2.getContainer().getWriteChannel().getIPCPort()));
+      assertTrue(ports.add(sm3.getContainer().getWriteChannel().getIPCPort()));
 
 
     }
@@ -199,9 +199,9 @@ public class TestMiniOzoneCluster {
             TestUtils.randomDatanodeDetails(), ozoneConf)
     ) {
       HashSet<Integer> ports = new HashSet<Integer>();
-      assertTrue(ports.add(sm1.getContainer().getContainerServerPort()));
-      assertFalse(ports.add(sm2.getContainer().getContainerServerPort()));
-      assertFalse(ports.add(sm3.getContainer().getContainerServerPort()));
+      assertTrue(ports.add(sm1.getContainer().getReadChannel().getIPCPort()));
+      assertFalse(ports.add(sm2.getContainer().getReadChannel().getIPCPort()));
+      assertFalse(ports.add(sm3.getContainer().getReadChannel().getIPCPort()));
       assertEquals(ports.iterator().next().intValue(),
           conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
               OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 405ce8e..abd60a1 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -134,7 +134,7 @@ public class TestCloseContainerHandlingByClient {
         .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
         .build();
 
-    waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
+    waitForContainerClose(keyName, key);
     key.write(data);
     key.flush();
     key.close();
@@ -162,11 +162,12 @@ public class TestCloseContainerHandlingByClient {
     Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
     //get the name of a valid container
     OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
-        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
+        .setBucketName(bucketName)
+        .setType(HddsProtos.ReplicationType.RATIS)
         .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
         .build();
 
-    waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
+    waitForContainerClose(keyName, key);
     key.close();
     // read the key from OM again and match the length.The length will still
     // be the equal to the original data size.
@@ -199,7 +200,7 @@ public class TestCloseContainerHandlingByClient {
         .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
         .build();
 
-    waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
+    waitForContainerClose(keyName, key);
     // write 1 more block worth of data. It will fail and new block will be
     // allocated
     key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize)
@@ -249,7 +250,7 @@ public class TestCloseContainerHandlingByClient {
         .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName)
         .build();
 
-    waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
+    waitForContainerClose(keyName, key);
 
     key.close();
     // read the key from OM again and match the length.The length will still
@@ -291,7 +292,7 @@ public class TestCloseContainerHandlingByClient {
         .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
         .build();
 
-    waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
+    waitForContainerClose(keyName, key);
     // write 3 more chunks worth of data. It will fail and new block will be
     // allocated. This write completes 4 blocks worth of data written to key
     data = Arrays.copyOfRange(writtenData, 3 * blockSize + chunkSize, keyLen);
@@ -321,7 +322,7 @@ public class TestCloseContainerHandlingByClient {
   }
 
   private void waitForContainerClose(String keyName,
-      OzoneOutputStream outputStream, HddsProtos.ReplicationType type)
+      OzoneOutputStream outputStream)
       throws Exception {
     ChunkGroupOutputStream groupOutputStream =
         (ChunkGroupOutputStream) outputStream.getOutputStream();
@@ -332,11 +333,10 @@ public class TestCloseContainerHandlingByClient {
       containerIdList.add(info.getContainerID());
     }
     Assert.assertTrue(!containerIdList.isEmpty());
-    waitForContainerClose(type, containerIdList.toArray(new Long[0]));
+    waitForContainerClose(containerIdList.toArray(new Long[0]));
   }
 
-  private void waitForContainerClose(HddsProtos.ReplicationType type,
-      Long... containerIdList)
+  private void waitForContainerClose(Long... containerIdList)
       throws ContainerNotFoundException, PipelineNotFoundException,
       TimeoutException, InterruptedException {
     List<Pipeline> pipelineList = new ArrayList<>();
@@ -358,7 +358,7 @@ public class TestCloseContainerHandlingByClient {
         // send the order to close the container
         cluster.getStorageContainerManager().getScmNodeManager()
             .addDatanodeCommand(details.getUuid(),
-                new CloseContainerCommand(containerID, type, 
pipeline.getId()));
+                new CloseContainerCommand(containerID, pipeline.getId()));
       }
     }
     int index = 0;
@@ -413,7 +413,7 @@ public class TestCloseContainerHandlingByClient {
             .getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
     Assert.assertEquals(1, datanodes.size());
-    waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
+    waitForContainerClose(keyName, key);
     dataString =
         ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
     data = dataString.getBytes(UTF_8);
@@ -459,7 +459,7 @@ public class TestCloseContainerHandlingByClient {
         .build();
 
     Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
-    waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
+    waitForContainerClose(keyName, key);
     // Again Write the Data. This will throw an exception which will be handled
     // and new blocks will be allocated
     key.write(data);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 30e3536..695b3f1 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -22,9 +22,11 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
@@ -32,7 +34,6 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
@@ -63,8 +64,9 @@ public class TestCloseContainerByPipeline {
   @BeforeClass
   public static void init() throws Exception {
     conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, "1");
     cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3).build();
+        .setNumDatanodes(9).build();
     cluster.waitForClusterToBeReady();
     //the easiest way to create an open container is creating a key
     client = OzoneClientFactory.getClient(conf);
@@ -86,7 +88,7 @@ public class TestCloseContainerByPipeline {
   @Test
   public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception {
     OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
-        .createKey("testCloseContainer", 1024, ReplicationType.STAND_ALONE,
+        .createKey("standalone", 1024, ReplicationType.RATIS,
             ReplicationFactor.ONE);
     key.write("standalone".getBytes());
     key.close();
@@ -94,10 +96,9 @@ public class TestCloseContainerByPipeline {
     //get the name of a valid container
     OmKeyArgs keyArgs =
         new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
-            .setType(HddsProtos.ReplicationType.STAND_ALONE)
+            .setType(HddsProtos.ReplicationType.RATIS)
             .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
-            .setKeyName("testCloseContainer").build();
-
+            .setKeyName("standalone").build();
     OmKeyLocationInfo omKeyLocationInfo =
         cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
@@ -127,8 +128,7 @@ public class TestCloseContainerByPipeline {
     //send the order to close the container
     cluster.getStorageContainerManager().getScmNodeManager()
         .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerID,
-                HddsProtos.ReplicationType.STAND_ALONE, pipeline.getId()));
+            new CloseContainerCommand(containerID, pipeline.getId()));
     GenericTestUtils
         .waitFor(() -> isContainerClosed(cluster, containerID, 
datanodeDetails),
             500, 5 * 1000);
@@ -142,7 +142,7 @@ public class TestCloseContainerByPipeline {
       throws IOException, TimeoutException, InterruptedException {
 
     OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
-        .createKey("standalone", 1024, ReplicationType.STAND_ALONE,
+        .createKey("standalone", 1024, ReplicationType.RATIS,
             ReplicationFactor.ONE);
     key.write("standalone".getBytes());
     key.close();
@@ -150,7 +150,7 @@ public class TestCloseContainerByPipeline {
     //get the name of a valid container
     OmKeyArgs keyArgs =
         new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
-            .setType(HddsProtos.ReplicationType.STAND_ALONE)
+            .setType(HddsProtos.ReplicationType.RATIS)
             .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
             .setKeyName("standalone").build();
 
@@ -170,30 +170,20 @@ public class TestCloseContainerByPipeline {
     Assert
         .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
 
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(OzoneContainer.LOG);
-    //send the order to close the container
+    // Send the order to close the container, give random pipeline id so that
+    // the container will not be closed via RATIS
     cluster.getStorageContainerManager().getScmNodeManager()
         .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerID,
-                HddsProtos.ReplicationType.STAND_ALONE, pipeline.getId()));
-
-    // The log will appear after the state changed to closed in standalone,
-    // wait for the log to ensure the operation has been done.
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains(
-        "submitting CloseContainer request over STAND_ALONE server for"
-            + " container " + containerID), 500, 5 * 1000);
+            new CloseContainerCommand(containerID, PipelineID.randomId()));
 
     //double check if it's really closed (waitFor also throws an exception)
-    Assert.assertTrue(isContainerClosed(cluster, containerID, 
datanodeDetails));
-    Assert.assertTrue(logCapturer.getOutput().contains(
-        "submitting CloseContainer request over STAND_ALONE server for"
-            + " container " + containerID));
-    // Make sure it was really closed via StandAlone not Ratis server
-    Assert.assertFalse((logCapturer.getOutput().contains(
-        "submitting CloseContainer request over RATIS server for container "
-            + containerID)));
-    logCapturer.stopCapturing();
+    // TODO: change the below line after implementing QUASI_CLOSED to CLOSED
+    // logic. The container will be QUASI closed as of now
+    GenericTestUtils
+        .waitFor(() -> isContainerQuasiClosed(
+            cluster, containerID, datanodeDetails), 500, 5 * 1000);
+    Assert.assertTrue(
+        isContainerQuasiClosed(cluster, containerID, datanodeDetails));
   }
 
   @Test
@@ -224,18 +214,14 @@ public class TestCloseContainerByPipeline {
     List<DatanodeDetails> datanodes = pipeline.getNodes();
     Assert.assertEquals(3, datanodes.size());
 
-    GenericTestUtils.LogCapturer logCapturer =
-        GenericTestUtils.LogCapturer.captureLogs(OzoneContainer.LOG);
-
     for (DatanodeDetails details : datanodes) {
       Assert.assertFalse(isContainerClosed(cluster, containerID, details));
       //send the order to close the container
       cluster.getStorageContainerManager().getScmNodeManager()
           .addDatanodeCommand(details.getUuid(),
-              new CloseContainerCommand(containerID,
-                  HddsProtos.ReplicationType.RATIS, pipeline.getId()));
+              new CloseContainerCommand(containerID, pipeline.getId()));
     }
-
+    // Make sure that it is CLOSED
     for (DatanodeDetails datanodeDetails : datanodes) {
       GenericTestUtils.waitFor(
           () -> isContainerClosed(cluster, containerID, datanodeDetails), 500,
@@ -244,14 +230,6 @@ public class TestCloseContainerByPipeline {
       Assert.assertTrue(isContainerClosed(cluster,
           containerID, datanodeDetails));
     }
-    // Make sure it was really closed via Ratis not STAND_ALONE server
-    Assert.assertFalse(logCapturer.getOutput().contains(
-        "submitting CloseContainer request over STAND_ALONE "
-            + "server for container " + containerID));
-    Assert.assertTrue((logCapturer.getOutput().contains(
-        "submitting CloseContainer request over RATIS server for container "
-            + containerID)));
-    logCapturer.stopCapturing();
   }
 
   private Boolean isContainerClosed(MiniOzoneCluster cluster, long containerID,
@@ -267,4 +245,18 @@ public class TestCloseContainerByPipeline {
     }
     return false;
   }
+
+  private Boolean isContainerQuasiClosed(MiniOzoneCluster miniCluster,
+      long containerID, DatanodeDetails datanode) {
+    ContainerData containerData;
+    for (HddsDatanodeService datanodeService : miniCluster.getHddsDatanodes()) 
{
+      if (datanode.equals(datanodeService.getDatanodeDetails())) {
+        containerData =
+            datanodeService.getDatanodeStateMachine().getContainer()
+                
.getContainerSet().getContainer(containerID).getContainerData();
+        return containerData.isQuasiClosed();
+      }
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index 9cf51d1..588a301 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -95,8 +95,7 @@ public class TestCloseContainerHandler {
     //send the order to close the container
     cluster.getStorageContainerManager().getScmNodeManager()
         .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerId.getId(),
-                HddsProtos.ReplicationType.STAND_ALONE, pipeline.getId()));
+            new CloseContainerCommand(containerId.getId(), pipeline.getId()));
 
     GenericTestUtils.waitFor(() ->
             isContainerClosed(cluster, containerId.getId()),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index d4f7ae5..527ab45 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -21,6 +21,7 @@ import static 
org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 
+import com.google.common.collect.Maps;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -36,12 +37,15 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
+import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.replication.GrpcReplicationService;
 import 
org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -49,6 +53,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.File;
+import java.util.Map;
 import java.util.UUID;
 
 /**
@@ -57,9 +62,9 @@ import java.util.UUID;
 public class TestContainerMetrics {
 
   private GrpcReplicationService createReplicationService(
-      ContainerSet containerSet) {
+      ContainerController controller) {
     return new GrpcReplicationService(
-        new OnDemandContainerReplicationSource(containerSet));
+        new OnDemandContainerReplicationSource(controller));
   }
 
   @Test
@@ -85,12 +90,21 @@ public class TestContainerMetrics {
       VolumeSet volumeSet = new VolumeSet(
           datanodeDetails.getUuidString(), conf);
       ContainerSet containerSet = new ContainerSet();
+      ContainerMetrics metrics = ContainerMetrics.create(conf);
+      Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
+      for (ContainerProtos.ContainerType containerType :
+          ContainerProtos.ContainerType.values()) {
+        handlers.put(containerType,
+            Handler.getHandlerForContainerType(
+                containerType, conf, null, containerSet, volumeSet, metrics));
+      }
       HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet,
-          volumeSet, null);
+          volumeSet, handlers, null, metrics);
       dispatcher.setScmId(UUID.randomUUID().toString());
 
       server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher,
-          createReplicationService(containerSet));
+          createReplicationService(new ContainerController(
+              containerSet, handlers)));
       client = new XceiverClientGrpc(pipeline, conf);
 
       server.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index 9a3fa1b..3e98594 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -18,11 +18,14 @@
 
 package org.apache.hadoop.ozone.container.server;
 
+import com.google.common.collect.Maps;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.replication.GrpcReplicationService;
 import 
org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -57,6 +60,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 
 import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
 import static org.apache.ratis.rpc.SupportedRpcType.NETTY;
@@ -71,15 +75,17 @@ public class TestContainerServer {
       = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator;
 
   private GrpcReplicationService createReplicationService(
-      ContainerSet containerSet) {
+      ContainerController containerController) {
     return new GrpcReplicationService(
-        new OnDemandContainerReplicationSource(containerSet));
+        new OnDemandContainerReplicationSource(containerController));
   }
 
   @Test
   public void testClientServer() throws Exception {
     DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
     ContainerSet containerSet = new ContainerSet();
+    ContainerController controller = new ContainerController(
+        containerSet, null);
     runTestClientServer(1, (pipeline, conf) -> conf
             .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
                 pipeline.getFirstNode()
@@ -87,7 +93,7 @@ public class TestContainerServer {
         XceiverClientGrpc::new,
         (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf,
             new TestContainerDispatcher(),
-            createReplicationService(containerSet)), (dn, p) -> {
+            createReplicationService(controller)), (dn, p) -> {
         });
   }
 
@@ -185,12 +191,22 @@ public class TestContainerServer {
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
 
       ContainerSet containerSet = new ContainerSet();
+      VolumeSet volumeSet = mock(VolumeSet.class);
+      ContainerMetrics metrics = ContainerMetrics.create(conf);
+      Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
+      for (ContainerProtos.ContainerType containerType :
+          ContainerProtos.ContainerType.values()) {
+        handlers.put(containerType,
+            Handler.getHandlerForContainerType(
+                containerType, conf, null, containerSet, volumeSet, metrics));
+      }
       HddsDispatcher dispatcher = new HddsDispatcher(
-          conf, mock(ContainerSet.class), mock(VolumeSet.class), null);
+          conf, containerSet, volumeSet, handlers, null, metrics);
       dispatcher.init();
       DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
       server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher,
-          createReplicationService(containerSet));
+          createReplicationService(
+              new ContainerController(containerSet, null)));
       client = new XceiverClientGrpc(pipeline, conf);
 
       server.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index f9e5753..01b51fa 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.ozone.genesis;
 
+import com.google.common.collect.Maps;
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
+import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.statemachine
     .DatanodeStateMachine.DatanodeStates;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -44,6 +47,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -100,9 +104,18 @@ public class BenchMarkDatanodeDispatcher {
 
     ContainerSet containerSet = new ContainerSet();
     VolumeSet volumeSet = new VolumeSet(datanodeUuid, conf);
-
-    dispatcher = new HddsDispatcher(conf, containerSet, volumeSet,
-        new StateContext(conf, DatanodeStates.RUNNING, null));
+    StateContext context = new StateContext(
+        conf, DatanodeStates.RUNNING, null);
+    ContainerMetrics metrics = ContainerMetrics.create(conf);
+    Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
+    for (ContainerProtos.ContainerType containerType :
+        ContainerProtos.ContainerType.values()) {
+      handlers.put(containerType,
+          Handler.getHandlerForContainerType(
+              containerType, conf, context, containerSet, volumeSet, metrics));
+    }
+    dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers,
+        context, metrics);
     dispatcher.init();
 
     containerCount = new AtomicInteger();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d96400/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
index a91e190..4a0cb61 100644
--- 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
+++ 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.freon;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.container.common.transport
     .server.XceiverServerSpi;
@@ -128,7 +127,7 @@ public class TestFreonWithDatanodeFastRestart {
   private StateMachine getStateMachine() throws Exception {
     XceiverServerSpi server =
         cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().
-            getContainer().getServer(HddsProtos.ReplicationType.RATIS);
+            getContainer().getWriteChannel();
     RaftServerProxy proxy =
         (RaftServerProxy)(((XceiverServerRatis)server).getServer());
     RaftGroupId groupId = proxy.getGroupIds().iterator().next();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to