gaborgsomogyi commented on a change in pull request #19096: [SPARK-21869][SS] A 
cached Kafka producer should not be closed if any task is using it - adds inuse 
tracking.
URL: https://github.com/apache/spark/pull/19096#discussion_r264183722
 
 

 ##########
 File path: 
external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/CachedKafkaProducerSuite.scala
 ##########
 @@ -35,43 +36,72 @@ class CachedKafkaProducerSuite extends SharedSQLContext 
with PrivateMethodTester
     CachedKafkaProducer.clear()
   }
 
-  test("Should return the cached instance on calling getOrCreate with same 
params.") {
-    val kafkaParams = new ju.HashMap[String, Object]()
-    kafkaParams.put("acks", "0")
-    // Here only host should be resolvable, it does not need a running 
instance of kafka server.
-    kafkaParams.put("bootstrap.servers", "127.0.0.1:9022")
-    kafkaParams.put("key.serializer", classOf[ByteArraySerializer].getName)
-    kafkaParams.put("value.serializer", classOf[ByteArraySerializer].getName)
-    val producer = CachedKafkaProducer.getOrCreate(kafkaParams)
-    val producer2 = CachedKafkaProducer.getOrCreate(kafkaParams)
-    assert(producer == producer2)
-
-    val cacheMap = PrivateMethod[ConcurrentMap[Seq[(String, Object)], 
KP]]('getAsMap)
-    val map = CachedKafkaProducer.invokePrivate(cacheMap())
+  test("Should return the cached instance on calling acquire with same 
params.") {
+    val kafkaParams: ju.HashMap[String, Object] = generateKafkaParams
+    val producer = CachedKafkaProducer.acquire(kafkaParams)
+    val producer2 = CachedKafkaProducer.acquire(kafkaParams)
+    assert(producer.kafkaProducer == producer2.kafkaProducer)
+    assert(producer.getInUseCount == 2)
+    val map = CachedKafkaProducer.getAsMap
     assert(map.size == 1)
   }
 
-  test("Should close the correct kafka producer for the given kafkaPrams.") {
+  test("Should return the new instance on calling acquire with different 
params.") {
+    val kafkaParams: ju.HashMap[String, Object] = generateKafkaParams
+    val producer = CachedKafkaProducer.acquire(kafkaParams)
+    kafkaParams.remove("ack") // mutate the kafka params.
+    val producer2 = CachedKafkaProducer.acquire(kafkaParams)
+    assert(producer.kafkaProducer != producer2.kafkaProducer)
+    assert(producer.getInUseCount == 1)
+    assert(producer2.getInUseCount == 1)
+    val map = CachedKafkaProducer.getAsMap
+    assert(map.size == 2)
+  }
+
+  test("Should return the cached instance, even if auth tokens are set up.") {
+    // TODO.
 
 Review comment:
   Query may fail because of corrupted delegation token or any other problems. 
I would like to handle this in SPARK-27042. I would focus on the following here:
   * If new producer created the latest delegation token has to be picked up
   * Delegation token shouldn't be part of the key
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to