ivandika3 commented on code in PR #7716:
URL: https://github.com/apache/ozone/pull/7716#discussion_r1921841198


##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java:
##########
@@ -105,90 +132,109 @@ public static void init() throws Exception {
         .setDataStreamWindowSize(5 * chunkSize)
         .applyTo(conf);
 
-    cluster = MiniOzoneCluster.newBuilder(conf)
+    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(5)
         .setDatanodeFactory(UniformDatanodesFactory.newBuilder()
             .setCurrentVersion(DN_OLD_VERSION)
             .build())
         .build();
+
     cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getRpcClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockdatastreamoutput";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
+
+    try (OzoneClient client = cluster.newClient()) {
+      ObjectStore objectStore = client.getObjectStore();
+      objectStore.createVolume(volumeName);
+      objectStore.getVolume(volumeName).createBucket(bucketName);
+    }
+
+    return cluster;
   }
 
-  static String getKeyName() {
-    return UUID.randomUUID().toString();
+  private static Stream<Arguments> clientParameters() {
+    return Stream.of(
+        Arguments.of(true, true),
+        Arguments.of(true, false),
+        Arguments.of(false, true),
+        Arguments.of(false, false)
+    );
   }
 
-  @AfterAll
-  public static void shutdown() {
-    IOUtils.closeQuietly(client);
-    if (cluster != null) {
-      cluster.shutdown();
-    }
+  private static Stream<Arguments> dataLengthParameters() {
+    return Stream.of(
+        Arguments.of(chunkSize / 2),
+        Arguments.of(chunkSize),
+        Arguments.of(chunkSize + 50),
+        Arguments.of(blockSize + 50)
+    );
   }
 
-  @Test
-  public void testHalfChunkWrite() throws Exception {
-    testWrite(chunkSize / 2);
-    testWriteWithFailure(chunkSize / 2);
+  static OzoneClientConfig newClientConfig(ConfigurationSource source,
+                                           boolean flushDelay, boolean 
enablePiggybacking) {
+    OzoneClientConfig clientConfig = source.getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
+    clientConfig.setStreamBufferFlushDelay(flushDelay);
+    clientConfig.setEnablePutblockPiggybacking(enablePiggybacking);
+    return clientConfig;
   }
 
-  @Test
-  public void testSingleChunkWrite() throws Exception {
-    testWrite(chunkSize);
-    testWriteWithFailure(chunkSize);
+  static OzoneClient newClient(OzoneConfiguration conf,
+                               OzoneClientConfig config) throws IOException {
+    OzoneConfiguration copy = new OzoneConfiguration(conf);
+    copy.setFromObject(config);
+    return OzoneClientFactory.getRpcClient(copy);
+  }
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */

Review Comment:
   Nit: Let's remove this since this is previous copied from `MiniDFSCluster` 
and not valid anymore.



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java:
##########
@@ -41,59 +44,83 @@
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.TestHelper;
-import org.apache.ozone.test.tag.Flaky;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
 import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.time.Duration;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.Stream;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.PutBlock;
-import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.WriteChunk;
-import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY;
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertInstanceOf;
 
 /**
  * Tests BlockDataStreamOutput class.
  */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
 @Timeout(300)
 public class TestBlockDataStreamOutput {
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static int chunkSize;
-  private static int flushSize;
-  private static int maxFlushSize;
-  private static int blockSize;
-  private static String volumeName;
-  private static String bucketName;
-  private static String keyString;
+  private MiniOzoneCluster cluster;
+  private static int chunkSize = 100;
+  private static int flushSize = 2 * chunkSize;
+  private static int maxFlushSize = 2 * flushSize;
+  private static int blockSize = 2 * maxFlushSize;
+  private static String volumeName = "testblockoutputstream";
+  private static String bucketName = volumeName;

Review Comment:
   Can make these constants `static final` with capital letters variable name.



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java:
##########
@@ -105,90 +132,109 @@ public static void init() throws Exception {
         .setDataStreamWindowSize(5 * chunkSize)
         .applyTo(conf);
 
-    cluster = MiniOzoneCluster.newBuilder(conf)
+    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(5)
         .setDatanodeFactory(UniformDatanodesFactory.newBuilder()
             .setCurrentVersion(DN_OLD_VERSION)
             .build())
         .build();
+
     cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getRpcClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockdatastreamoutput";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
+
+    try (OzoneClient client = cluster.newClient()) {
+      ObjectStore objectStore = client.getObjectStore();
+      objectStore.createVolume(volumeName);
+      objectStore.getVolume(volumeName).createBucket(bucketName);
+    }
+
+    return cluster;
   }
 
-  static String getKeyName() {
-    return UUID.randomUUID().toString();
+  private static Stream<Arguments> clientParameters() {
+    return Stream.of(
+        Arguments.of(true, true),
+        Arguments.of(true, false),
+        Arguments.of(false, true),
+        Arguments.of(false, false)
+    );
   }
 
-  @AfterAll
-  public static void shutdown() {
-    IOUtils.closeQuietly(client);
-    if (cluster != null) {
-      cluster.shutdown();
-    }
+  private static Stream<Arguments> dataLengthParameters() {
+    return Stream.of(
+        Arguments.of(chunkSize / 2),
+        Arguments.of(chunkSize),
+        Arguments.of(chunkSize + 50),
+        Arguments.of(blockSize + 50)
+    );
   }
 
-  @Test
-  public void testHalfChunkWrite() throws Exception {
-    testWrite(chunkSize / 2);
-    testWriteWithFailure(chunkSize / 2);
+  static OzoneClientConfig newClientConfig(ConfigurationSource source,
+                                           boolean flushDelay, boolean 
enablePiggybacking) {
+    OzoneClientConfig clientConfig = source.getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
+    clientConfig.setStreamBufferFlushDelay(flushDelay);
+    clientConfig.setEnablePutblockPiggybacking(enablePiggybacking);
+    return clientConfig;
   }
 
-  @Test
-  public void testSingleChunkWrite() throws Exception {
-    testWrite(chunkSize);
-    testWriteWithFailure(chunkSize);
+  static OzoneClient newClient(OzoneConfiguration conf,
+                               OzoneClientConfig config) throws IOException {
+    OzoneConfiguration copy = new OzoneConfiguration(conf);
+    copy.setFromObject(config);
+    return OzoneClientFactory.getRpcClient(copy);
+  }
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @BeforeAll
+  public void init() throws Exception {
+    cluster = createCluster();
   }
 
-  @Test
-  public void testMultiChunkWrite() throws Exception {
-    testWrite(chunkSize + 50);
-    testWriteWithFailure(chunkSize + 50);
+  static String getKeyName() {
+    return UUID.randomUUID().toString();
   }
 
-  @Test
-  @Flaky("HDDS-12027")
-  public void testMultiBlockWrite() throws Exception {
-    testWrite(blockSize + 50);
-    testWriteWithFailure(blockSize + 50);
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterAll
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
   }
 
-  static void testWrite(int dataLength) throws Exception {
-    XceiverClientMetrics metrics =
-        XceiverClientManager.getXceiverClientMetrics();
-    long pendingWriteChunkCount = 
metrics.getPendingContainerOpCountMetrics(WriteChunk);
-    long pendingPutBlockCount = 
metrics.getPendingContainerOpCountMetrics(PutBlock);
+  @ParameterizedTest
+  @MethodSource("dataLengthParameters")
+  public void testHalfChunkWrite(int dataLength) throws Exception {
+    OzoneClientConfig config = newClientConfig(cluster.getConf(), false, true);
+    try (OzoneClient client = newClient(cluster.getConf(), config)) {
+      testWrite(client, dataLength);
+      testWriteWithFailure(client, dataLength);
+    }
+  }

Review Comment:
   There is discrepancy here. The method name is `testHalfChunkWrite`, but 
`dataLengthParameters` here have different lengths.
   
   Can use a more generalized name like `testStreamWrite`.
   
   In the future, we can separate the different data length to different 
methods since each cases might have specialized assertions only for that case 
(e.g. in `BlockOutputStream`: `testWriteMoreThanChunkSize`, 
`testWriteExactlyFlushSize`, etc.)



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java:
##########
@@ -105,90 +132,109 @@ public static void init() throws Exception {
         .setDataStreamWindowSize(5 * chunkSize)
         .applyTo(conf);
 
-    cluster = MiniOzoneCluster.newBuilder(conf)
+    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(5)
         .setDatanodeFactory(UniformDatanodesFactory.newBuilder()
             .setCurrentVersion(DN_OLD_VERSION)
             .build())
         .build();
+
     cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getRpcClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockdatastreamoutput";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
+
+    try (OzoneClient client = cluster.newClient()) {
+      ObjectStore objectStore = client.getObjectStore();
+      objectStore.createVolume(volumeName);
+      objectStore.getVolume(volumeName).createBucket(bucketName);
+    }
+
+    return cluster;
   }
 
-  static String getKeyName() {
-    return UUID.randomUUID().toString();
+  private static Stream<Arguments> clientParameters() {
+    return Stream.of(
+        Arguments.of(true, true),
+        Arguments.of(true, false),
+        Arguments.of(false, true),
+        Arguments.of(false, false)
+    );
   }
 
-  @AfterAll
-  public static void shutdown() {
-    IOUtils.closeQuietly(client);
-    if (cluster != null) {
-      cluster.shutdown();
-    }
+  private static Stream<Arguments> dataLengthParameters() {
+    return Stream.of(
+        Arguments.of(chunkSize / 2),
+        Arguments.of(chunkSize),
+        Arguments.of(chunkSize + 50),
+        Arguments.of(blockSize + 50)
+    );
   }
 
-  @Test
-  public void testHalfChunkWrite() throws Exception {
-    testWrite(chunkSize / 2);
-    testWriteWithFailure(chunkSize / 2);
+  static OzoneClientConfig newClientConfig(ConfigurationSource source,
+                                           boolean flushDelay, boolean 
enablePiggybacking) {
+    OzoneClientConfig clientConfig = source.getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
+    clientConfig.setStreamBufferFlushDelay(flushDelay);
+    clientConfig.setEnablePutblockPiggybacking(enablePiggybacking);
+    return clientConfig;

Review Comment:
   `setEnablePutblockPiggybacking` is not supported for Ozone streaming write 
pipeline V2 (i.e. `BlockDataStreamOutput`). Let's remove it first.
   
   Raised https://issues.apache.org/jira/browse/HDDS-12111 for supporting it.



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java:
##########
@@ -105,90 +132,109 @@ public static void init() throws Exception {
         .setDataStreamWindowSize(5 * chunkSize)
         .applyTo(conf);
 
-    cluster = MiniOzoneCluster.newBuilder(conf)
+    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(5)
         .setDatanodeFactory(UniformDatanodesFactory.newBuilder()
             .setCurrentVersion(DN_OLD_VERSION)
             .build())
         .build();
+
     cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getRpcClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockdatastreamoutput";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
+
+    try (OzoneClient client = cluster.newClient()) {
+      ObjectStore objectStore = client.getObjectStore();
+      objectStore.createVolume(volumeName);
+      objectStore.getVolume(volumeName).createBucket(bucketName);
+    }
+
+    return cluster;
   }
 
-  static String getKeyName() {
-    return UUID.randomUUID().toString();
+  private static Stream<Arguments> clientParameters() {
+    return Stream.of(
+        Arguments.of(true, true),
+        Arguments.of(true, false),
+        Arguments.of(false, true),
+        Arguments.of(false, false)
+    );
   }
 
-  @AfterAll
-  public static void shutdown() {
-    IOUtils.closeQuietly(client);
-    if (cluster != null) {
-      cluster.shutdown();
-    }
+  private static Stream<Arguments> dataLengthParameters() {
+    return Stream.of(
+        Arguments.of(chunkSize / 2),
+        Arguments.of(chunkSize),
+        Arguments.of(chunkSize + 50),
+        Arguments.of(blockSize + 50)
+    );
   }
 
-  @Test
-  public void testHalfChunkWrite() throws Exception {
-    testWrite(chunkSize / 2);
-    testWriteWithFailure(chunkSize / 2);
+  static OzoneClientConfig newClientConfig(ConfigurationSource source,
+                                           boolean flushDelay, boolean 
enablePiggybacking) {
+    OzoneClientConfig clientConfig = source.getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
+    clientConfig.setStreamBufferFlushDelay(flushDelay);
+    clientConfig.setEnablePutblockPiggybacking(enablePiggybacking);
+    return clientConfig;
   }
 
-  @Test
-  public void testSingleChunkWrite() throws Exception {
-    testWrite(chunkSize);
-    testWriteWithFailure(chunkSize);
+  static OzoneClient newClient(OzoneConfiguration conf,
+                               OzoneClientConfig config) throws IOException {
+    OzoneConfiguration copy = new OzoneConfiguration(conf);
+    copy.setFromObject(config);
+    return OzoneClientFactory.getRpcClient(copy);
+  }
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @BeforeAll
+  public void init() throws Exception {
+    cluster = createCluster();
   }
 
-  @Test
-  public void testMultiChunkWrite() throws Exception {
-    testWrite(chunkSize + 50);
-    testWriteWithFailure(chunkSize + 50);
+  static String getKeyName() {
+    return UUID.randomUUID().toString();
   }
 
-  @Test
-  @Flaky("HDDS-12027")
-  public void testMultiBlockWrite() throws Exception {
-    testWrite(blockSize + 50);
-    testWriteWithFailure(blockSize + 50);
+  /**
+   * Shutdown MiniDFSCluster.
+   */

Review Comment:
   Nit: Also remove this.



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java:
##########
@@ -105,90 +132,109 @@ public static void init() throws Exception {
         .setDataStreamWindowSize(5 * chunkSize)
         .applyTo(conf);
 
-    cluster = MiniOzoneCluster.newBuilder(conf)
+    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(5)
         .setDatanodeFactory(UniformDatanodesFactory.newBuilder()
             .setCurrentVersion(DN_OLD_VERSION)
             .build())
         .build();
+
     cluster.waitForClusterToBeReady();

Review Comment:
   Similarly to HDDS-9806, add
   
   ```java
   cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE,
           180000);
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to