shashankhs11 commented on code in PR #20285:
URL: https://github.com/apache/kafka/pull/20285#discussion_r2409112862


##########
streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AtLeastOnceDeliveryMessageLossIntegrationTest.java:
##########
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.integration;
+
+import org.apache.kafka.clients.admin.Admin;
+import org.apache.kafka.clients.admin.AdminClientConfig;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.internals.Sender;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.utils.LogCaptureAppender;
+import org.apache.kafka.streams.KafkaStreams;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.StreamsBuilder;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster;
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils;
+import org.apache.kafka.streams.kstream.KStream;
+import org.apache.kafka.test.TestUtils;
+
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInfo;
+import org.junit.jupiter.api.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Properties;
+
+import static org.apache.kafka.common.utils.Utils.mkEntry;
+import static org.apache.kafka.common.utils.Utils.mkMap;
+import static 
org.apache.kafka.streams.integration.utils.IntegrationTestUtils.DEFAULT_TIMEOUT;
+import static 
org.apache.kafka.streams.integration.utils.IntegrationTestUtils.cleanStateBeforeTest;
+import static 
org.apache.kafka.streams.integration.utils.IntegrationTestUtils.purgeLocalStreamsState;
+import static 
org.apache.kafka.streams.integration.utils.IntegrationTestUtils.startApplicationAndWaitUntilRunning;
+import static 
org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived;
+import static org.apache.kafka.streams.utils.TestUtils.safeUniqueTestName;
+import static org.apache.kafka.test.TestUtils.consumerConfig;
+import static org.apache.kafka.test.TestUtils.producerConfig;
+import static org.apache.kafka.test.TestUtils.waitForCondition;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+
+@Timeout(600)
+@Tag("integration")
+public class AtLeastOnceDeliveryMessageLossIntegrationTest {
+    private static final Logger log = LoggerFactory.getLogger(
+        AtLeastOnceDeliveryMessageLossIntegrationTest.class);
+    
+    private static final int NUM_BROKERS = 1;
+    private static final int LARGE_RECORD_COUNT = 50000;
+    private static final int SMALL_RECORD_COUNT = 40000;
+    
+    public static final EmbeddedKafkaCluster CLUSTER = new 
EmbeddedKafkaCluster(NUM_BROKERS);
+    
+    @BeforeAll
+    public static void startCluster() throws IOException {
+        CLUSTER.start();
+    }
+    
+    @AfterAll
+    public static void closeCluster() {
+        CLUSTER.stop();
+    }
+    
+    private String applicationId;
+    private String inputTopic;
+    private String outputTopic;
+    private Properties streamsConfiguration;
+    private KafkaStreams kafkaStreams;
+    
+    @BeforeEach
+    public void setUp(final TestInfo testInfo) throws Exception {
+        final String testId = safeUniqueTestName(testInfo);
+        applicationId = "app-" + testId;
+        inputTopic = "input-" + testId;
+        outputTopic = "output-" + testId;
+        
+        cleanStateBeforeTest(CLUSTER, inputTopic, outputTopic);
+        CLUSTER.createTopics(inputTopic, outputTopic);
+        
+        setupStreamsConfiguration();
+    }
+    
+    @AfterEach
+    public void cleanUp() throws Exception {
+        if (kafkaStreams != null) {
+            kafkaStreams.close();
+        }
+        if (streamsConfiguration != null) {
+            purgeLocalStreamsState(streamsConfiguration);
+        }
+    }
+
+    // failing test
+    @Test
+    public void 
shouldNotCommitOffsetsAndNotProduceOutputRecordsWhenProducerFailsWithMessageTooLarge()
 throws Exception {
+        
+        try (final LogCaptureAppender appender = 
LogCaptureAppender.createAndRegister(Sender.class)) {
+            produceInputData(LARGE_RECORD_COUNT);
+
+            kafkaStreams = createStreamsApplication();
+            
startApplicationAndWaitUntilRunning(Collections.singletonList(kafkaStreams), 
Duration.ofMillis(DEFAULT_TIMEOUT));
+
+            waitForProcessingAndCommit();
+
+            // for this bug
+            // first offsets are committed, then
+            // no messages produced in output topic, then
+            // repeated retries and MESSAGE_TOO_LARGE error
+
+            assertTrue(appender.getMessages().stream()
+                    .anyMatch(msg -> msg.contains("MESSAGE_TOO_LARGE") && 
msg.contains("splitting and retrying")),
+                "Should log MESSAGE_TOO_LARGE and splitting retry messages");
+
+            final int outputRecordCount = verifyOutputRecords(0); // should 
not produce records
+            final boolean offsetsCommitted = 
verifyConsumerOffsetsCommitted(0); // should not commit offset unless records 
are produced
+
+            assertEquals(0, outputRecordCount, "Output topic should not have 
any records");
+            assertTrue(offsetsCommitted, "Consumer offsets should not be 
committed");
+        }
+    }
+    
+    @Test
+    public void 
shouldCommitOffsetsAndProduceMessagesNormallyForSmallerRecordCount() throws 
Exception {
+        produceInputData(SMALL_RECORD_COUNT);
+        
+        try (final KafkaStreams kafkaStreams = createStreamsApplication()) {
+            
startApplicationAndWaitUntilRunning(Collections.singletonList(kafkaStreams), 
Duration.ofMillis(DEFAULT_TIMEOUT));
+            
+            waitForProcessingAndCommit();
+
+            //normal behavior
+            final int outputRecordCount = 
verifyOutputRecords(SMALL_RECORD_COUNT); //should produce records
+            final boolean offsetsCommitted = 
verifyConsumerOffsetsCommitted(SMALL_RECORD_COUNT); // should commit offsets
+
+            assertEquals(SMALL_RECORD_COUNT, outputRecordCount, "Output topic 
should have " + SMALL_RECORD_COUNT + " records");
+            assertTrue(offsetsCommitted, "Consumer offsets should be 
committed");
+        }
+    }
+    
+    
+    private void setupStreamsConfiguration() {
+        streamsConfiguration = new Properties();
+        streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, 
applicationId);
+        streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, 
CLUSTER.bootstrapServers());
+        streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, 
TestUtils.tempDirectory().getPath());
+        streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, 
Serdes.String().getClass());
+        
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, 
Serdes.String().getClass());
+        
+        // AT_LEAST_ONCE processing guarantee
+        streamsConfiguration.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, 
StreamsConfig.AT_LEAST_ONCE);
+        streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 
30000L);
+        
+        // Producer configuration that can trigger MESSAGE_TOO_LARGE errors
+        streamsConfiguration.put(ProducerConfig.LINGER_MS_CONFIG, 300000);
+        streamsConfiguration.put(ProducerConfig.BATCH_SIZE_CONFIG, 33554432);
+    }
+
+    private void produceInputData(final int recordCount) throws Exception {

Review Comment:
   Removed in 38f9b9f



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to