This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new b20ceebae1 HDDS-10071. Merge TestContainerStateMachineIdempotency into
TestContainerOperations (#6813)
b20ceebae1 is described below
commit b20ceebae17a14b663fa18e09582d549c5c70f41
Author: SaketaChalamchala <[email protected]>
AuthorDate: Sun Jun 16 22:40:19 2024 -0700
HDDS-10071. Merge TestContainerStateMachineIdempotency into
TestContainerOperations (#6813)
---
.../dev-support/findbugsExcludeFile.xml | 2 +-
.../hadoop/ozone/TestContainerOperations.java | 56 ++++++++++
.../TestContainerStateMachineIdempotency.java | 116 ---------------------
3 files changed, 57 insertions(+), 117 deletions(-)
diff --git a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
index 632e9fc2f4..5ca3373012 100644
--- a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
@@ -41,7 +41,7 @@
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
</Match>
<Match>
- <Class
name="org.apache.hadoop.ozone.TestContainerStateMachineIdempotency"/>
+ <Class name="org.apache.hadoop.ozone.TestContainerOperations"/>
<Bug pattern="RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT" />
</Match>
<Match>
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
index 5f8f34a2e3..cbd1829ef0 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
@@ -17,8 +17,14 @@
*/
package org.apache.hadoop.ozone;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
@@ -27,6 +33,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
@@ -34,8 +41,13 @@ import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.REPLICATION;
+import
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -56,6 +68,9 @@ public class TestContainerOperations {
private static ScmClient storageClient;
private static MiniOzoneCluster cluster;
private static OzoneConfiguration ozoneConf;
+ private static StorageContainerLocationProtocolClientSideTranslatorPB
+ storageContainerLocationClient;
+ private static XceiverClientManager xceiverClientManager;
@BeforeAll
public static void setup() throws Exception {
@@ -65,6 +80,9 @@ public class TestContainerOperations {
cluster =
MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build();
storageClient = new ContainerOperationClient(ozoneConf);
cluster.waitForClusterToBeReady();
+ storageContainerLocationClient =
+ cluster.getStorageContainerLocationClient();
+ xceiverClientManager = new XceiverClientManager(ozoneConf);
}
@AfterAll
@@ -72,6 +90,44 @@ public class TestContainerOperations {
if (cluster != null) {
cluster.shutdown();
}
+ IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
+ }
+
+ @Test
+ void testContainerStateMachineIdempotency() throws Exception {
+ ContainerWithPipeline container = storageContainerLocationClient
+ .allocateContainer(HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
+ long containerID = container.getContainerInfo().getContainerID();
+ Pipeline pipeline = container.getPipeline();
+ XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
+ //create the container
+ ContainerProtocolCalls.createContainer(client, containerID, null);
+ // call create Container again
+ BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
+ byte[] data =
+ RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
+ ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+ ContainerTestHelper
+ .getWriteChunkRequest(container.getPipeline(), blockID,
+ data.length);
+ client.sendCommand(writeChunkRequest);
+
+ //Make the write chunk request again without requesting for overWrite
+ client.sendCommand(writeChunkRequest);
+ // Now, explicitly make a putKey request for the block.
+ ContainerProtos.ContainerCommandRequestProto putKeyRequest =
+ ContainerTestHelper
+ .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
+ client.sendCommand(putKeyRequest).getPutBlock();
+ // send the putBlock again
+ client.sendCommand(putKeyRequest);
+
+ // close container call
+ ContainerProtocolCalls.closeContainer(client, containerID, null);
+ ContainerProtocolCalls.closeContainer(client, containerID, null);
+
+ xceiverClientManager.releaseClient(client, false);
}
/**
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
deleted file mode 100644
index c727ecd0a9..0000000000
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.PlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.
- SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.protocolPB.
- StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-import java.util.concurrent.TimeUnit;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-/**
- * Tests the idempotent operations in ContainerStateMachine.
- */
-@Timeout(value = 300, unit = TimeUnit.SECONDS)
-public class TestContainerStateMachineIdempotency {
- private static MiniOzoneCluster cluster;
- private static OzoneConfiguration ozoneConfig;
- private static StorageContainerLocationProtocolClientSideTranslatorPB
- storageContainerLocationClient;
- private static XceiverClientManager xceiverClientManager;
-
- @BeforeAll
- public static void init() throws Exception {
- ozoneConfig = new OzoneConfiguration();
- ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
- SCMContainerPlacementCapacity.class, PlacementPolicy.class);
- cluster =
- MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(3).build();
- cluster.waitForClusterToBeReady();
- storageContainerLocationClient =
- cluster.getStorageContainerLocationClient();
- xceiverClientManager = new XceiverClientManager(ozoneConfig);
- }
-
- @AfterAll
- public static void shutdown() {
- if (cluster != null) {
- cluster.shutdown();
- }
- IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
- }
-
- @Test
- void testContainerStateMachineIdempotency() throws Exception {
- ContainerWithPipeline container = storageContainerLocationClient
- .allocateContainer(HddsProtos.ReplicationType.RATIS,
- HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
- long containerID = container.getContainerInfo().getContainerID();
- Pipeline pipeline = container.getPipeline();
- XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
- //create the container
- ContainerProtocolCalls.createContainer(client, containerID, null);
- // call create Container again
- BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
- byte[] data =
- RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
- ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
- ContainerTestHelper
- .getWriteChunkRequest(container.getPipeline(), blockID,
- data.length);
- client.sendCommand(writeChunkRequest);
-
- //Make the write chunk request again without requesting for overWrite
- client.sendCommand(writeChunkRequest);
- // Now, explicitly make a putKey request for the block.
- ContainerProtos.ContainerCommandRequestProto putKeyRequest =
- ContainerTestHelper
- .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
- client.sendCommand(putKeyRequest).getPutBlock();
- // send the putBlock again
- client.sendCommand(putKeyRequest);
-
- // close container call
- ContainerProtocolCalls.closeContainer(client, containerID, null);
- ContainerProtocolCalls.closeContainer(client, containerID, null);
-
- xceiverClientManager.releaseClient(client, false);
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]