chenyulin0719 commented on code in PR #18513:
URL: https://github.com/apache/kafka/pull/18513#discussion_r1948486105


##########
core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala:
##########
@@ -4003,6 +3939,72 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {
       ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, 
Collections.emptyList(), null, null),
       topicConfigs.get(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG))
   }
+
+  class BackgroundConsumerSet(testGroupId: String, defaultConsumerConfig: 
Properties) {
+    private val consumerSet: 
scala.collection.mutable.Set[Consumer[Array[Byte], Array[Byte]]] = 
scala.collection.mutable.Set.empty
+    private val consumerThreads: scala.collection.mutable.Set[Thread] = 
scala.collection.mutable.Set.empty
+    private var startLatch: CountDownLatch = new CountDownLatch(0)
+    private var stopLatch: CountDownLatch = new CountDownLatch(0)
+    private var consumerThreadRunning = new AtomicBoolean(false)
+
+    defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, 
testGroupId)
+
+    def addConsumer(topic: String, configOverrides: Properties = new 
Properties()): Unit = {
+      val newConsumerConfig = 
defaultConsumerConfig.clone().asInstanceOf[Properties]
+      newConsumerConfig.putAll(configOverrides)
+
+      val consumer = createConsumer(configOverrides = newConsumerConfig)
+      val consumerThread = createConsumerThread(consumer, topic)
+      consumerSet.add(consumer)
+      consumerThreads.add(consumerThread)
+    }
+
+    def start(): Unit = {
+      startLatch = new CountDownLatch(consumerSet.size)
+      stopLatch = new CountDownLatch(consumerSet.size)
+      consumerThreadRunning = new AtomicBoolean(true)
+      consumerThreads.foreach(_.start())
+      assertTrue(startLatch.await(30000, TimeUnit.MILLISECONDS), "Failed to 
start consumer threads in time")
+    }
+
+    def stop(): Unit = {
+      consumerSet.foreach(_.wakeup())
+      consumerThreadRunning.set(false)
+      assertTrue(stopLatch.await(30000, TimeUnit.MILLISECONDS), "Failed to 
stop consumer threads in time")
+    }
+
+    def close(): Unit = {
+      // stop the consumers and wait for consumer threads stopped
+      stop()
+      consumerThreads.foreach(_.join())
+    }
+
+    private def createConsumerThread[K,V](consumer: Consumer[K,V], topic: 
String): Thread = {
+      new Thread {
+        override def run : Unit = {
+          consumer.subscribe(Collections.singleton(topic))
+          try {
+            while (consumerThreadRunning.get()) {
+              consumer.poll(JDuration.ofSeconds(5))
+              if (!consumer.assignment.isEmpty && startLatch.getCount > 0L)
+                startLatch.countDown()
+              try {
+                consumer.commitSync()
+              } catch {
+                case _: CommitFailedException => // Ignore and retry on next 
iteration.
+              }
+            }
+          } catch {
+            case _: WakeupException => // ignore

Review Comment:
   Hi @chia7712, I prefer to use `consumerThreadRunning` to explicitly control 
the `consumerThread` 's life cycle.
   
   The `WakeupException` is controllered by a `wakeup` variable in 
`ConsumerNetworkClient` 
([Link](https://github.com/apache/kafka/blob/8b22f100831d384910bf770a41ba80724e21cd02/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java#L528-L530))
 and it could be concurrent mutate by `heartbeat thread`. So we can't ensure 
that the `wakeup` call will always trigger `WakeupException`. As I known, this 
is the root cause of this flaky test:
   
   - https://issues.apache.org/jira/browse/KAFKA-18310
   
   Based on above reason, I prefer to keep `consumerThreadRunning` flag.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to