adoroszlai commented on code in PR #7716:
URL: https://github.com/apache/ozone/pull/7716#discussion_r1922454185


##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java:
##########
@@ -41,154 +45,185 @@
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.TestHelper;
-import org.apache.ozone.test.tag.Flaky;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
 import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 
+import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.time.Duration;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.Stream;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.PutBlock;
-import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.WriteChunk;
-import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY;
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertInstanceOf;
 
 /**
  * Tests BlockDataStreamOutput class.
  */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
 @Timeout(300)
 public class TestBlockDataStreamOutput {
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-  private static OzoneClient client;
-  private static ObjectStore objectStore;
-  private static int chunkSize;
-  private static int flushSize;
-  private static int maxFlushSize;
-  private static int blockSize;
-  private static String volumeName;
-  private static String bucketName;
-  private static String keyString;
+  private MiniOzoneCluster cluster;
+  private static final int CHUNK_SIZE = 100;
+  private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+  private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+  private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+  private static final String VOLUME_NAME = "testblockoutputstream";
+  private static final String BUCKET_NAME = VOLUME_NAME;
+  private static String keyString = UUID.randomUUID().toString();;
   private static final DatanodeVersion DN_OLD_VERSION = 
DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE;
 
-  @BeforeAll
-  public static void init() throws Exception {
-    chunkSize = 100;
-    flushSize = 2 * chunkSize;
-    maxFlushSize = 2 * flushSize;
-    blockSize = 2 * maxFlushSize;
-
+  static MiniOzoneCluster createCluster() throws IOException,
+      InterruptedException, TimeoutException {
+    OzoneConfiguration conf = new OzoneConfiguration();
     OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
+    clientConfig.setStreamBufferFlushDelay(false);
+    clientConfig.setEnablePutblockPiggybacking(true);
     conf.setFromObject(clientConfig);
 
-    conf.setBoolean(OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY, true);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
+    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS);
     conf.setQuietMode(false);
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
-        StorageUnit.MB);
+    conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB);
+    conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 3);
+
+    conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
+    conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
+
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
+
+    RatisClientConfig.RaftConfig raftClientConfig =
+        conf.getObject(RatisClientConfig.RaftConfig.class);
+    raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
+    raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(5));
+    conf.setFromObject(raftClientConfig);
+
+    RatisClientConfig ratisClientConfig =
+        conf.getObject(RatisClientConfig.class);
+    ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30));
+    ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30));
+    conf.setFromObject(ratisClientConfig);
 
     ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
-        .setBlockSize(blockSize)
-        .setChunkSize(chunkSize)
-        .setStreamBufferFlushSize(flushSize)
-        .setStreamBufferMaxSize(maxFlushSize)
-        .setDataStreamBufferFlushSize(maxFlushSize)
-        .setDataStreamMinPacketSize(chunkSize)
-        .setDataStreamWindowSize(5 * chunkSize)
+        .setBlockSize(BLOCK_SIZE)
+        .setChunkSize(CHUNK_SIZE)
+        .setStreamBufferFlushSize(FLUSH_SIZE)
+        .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
+        .setDataStreamBufferFlushSize(MAX_FLUSH_SIZE)
+        .setDataStreamMinPacketSize(CHUNK_SIZE)
+        .setDataStreamWindowSize(5 * CHUNK_SIZE)
         .applyTo(conf);
 
-    cluster = MiniOzoneCluster.newBuilder(conf)
+    MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(5)
         .setDatanodeFactory(UniformDatanodesFactory.newBuilder()
             .setCurrentVersion(DN_OLD_VERSION)
             .build())
         .build();
+    cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE,
+        180000);
     cluster.waitForClusterToBeReady();
-    //the easiest way to create an open container is creating a key
-    client = OzoneClientFactory.getRpcClient(conf);
-    objectStore = client.getObjectStore();
-    keyString = UUID.randomUUID().toString();
-    volumeName = "testblockdatastreamoutput";
-    bucketName = volumeName;
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
+
+    try (OzoneClient client = cluster.newClient()) {
+      ObjectStore objectStore = client.getObjectStore();
+      objectStore.createVolume(VOLUME_NAME);
+      objectStore.getVolume(VOLUME_NAME).createBucket(BUCKET_NAME);
+    }
+
+    return cluster;
   }
 
-  static String getKeyName() {
-    return UUID.randomUUID().toString();
+  private static Stream<Arguments> clientParameters() {
+    return Stream.of(
+        Arguments.of(true),
+        Arguments.of(false)
+    );
   }
 
-  @AfterAll
-  public static void shutdown() {
-    IOUtils.closeQuietly(client);
-    if (cluster != null) {
-      cluster.shutdown();
-    }
+  private static Stream<Arguments> dataLengthParameters() {
+    return Stream.of(
+        Arguments.of(CHUNK_SIZE / 2),
+        Arguments.of(CHUNK_SIZE),
+        Arguments.of(CHUNK_SIZE + 50),
+        Arguments.of(BLOCK_SIZE + 50)
+    );
   }
 
-  @Test
-  public void testHalfChunkWrite() throws Exception {
-    testWrite(chunkSize / 2);
-    testWriteWithFailure(chunkSize / 2);
+  static OzoneClientConfig newClientConfig(ConfigurationSource source,
+                                           boolean flushDelay) {
+    OzoneClientConfig clientConfig = source.getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
+    clientConfig.setStreamBufferFlushDelay(flushDelay);
+    return clientConfig;
   }
 
-  @Test
-  public void testSingleChunkWrite() throws Exception {
-    testWrite(chunkSize);
-    testWriteWithFailure(chunkSize);
+  static OzoneClient newClient(OzoneConfiguration conf,
+                               OzoneClientConfig config) throws IOException {
+    OzoneConfiguration copy = new OzoneConfiguration(conf);
+    copy.setFromObject(config);
+    return OzoneClientFactory.getRpcClient(copy);
   }
 
-  @Test
-  public void testMultiChunkWrite() throws Exception {
-    testWrite(chunkSize + 50);
-    testWriteWithFailure(chunkSize + 50);
+  @BeforeAll
+  public void init() throws Exception {
+    cluster = createCluster();
+  }
+
+  static String getKeyName() {
+    return UUID.randomUUID().toString();
   }
 
-  @Test
-  @Flaky("HDDS-12027")

Review Comment:
   `@Flaky` simply allows the test to be re-run few times if it fails.
   
   1. Strict: only allow that single test case to be repeated by:
      - remove the parameter value that corresponds to old flaky test case
      - restore the old test case
      - replace its implementation to invoke the parameterized method with that 
value
   2. Lenient: mark the parameterized method as `@Flaky`, test cases that 
always pass won't be re-run.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to