gaborgsomogyi commented on a change in pull request #25853: [SPARK-21869][SS] 
Apply Apache Commons Pool to Kafka producer
URL: https://github.com/apache/spark/pull/25853#discussion_r343659955
 
 

 ##########
 File path: 
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/InternalKafkaConsumerPool.scala
 ##########
 @@ -18,204 +18,46 @@
 package org.apache.spark.sql.kafka010
 
 import java.{util => ju}
-import java.util.concurrent.ConcurrentHashMap
 
-import org.apache.commons.pool2.{BaseKeyedPooledObjectFactory, PooledObject, 
SwallowedExceptionListener}
-import org.apache.commons.pool2.impl.{DefaultEvictionPolicy, 
DefaultPooledObject, GenericKeyedObjectPool, GenericKeyedObjectPoolConfig}
+import org.apache.commons.pool2.PooledObject
 
 import org.apache.spark.SparkConf
-import org.apache.spark.internal.Logging
-import org.apache.spark.sql.kafka010.InternalKafkaConsumerPool._
 import org.apache.spark.sql.kafka010.KafkaDataConsumer.CacheKey
 
-/**
- * Provides object pool for [[InternalKafkaConsumer]] which is grouped by 
[[CacheKey]].
- *
- * This class leverages [[GenericKeyedObjectPool]] internally, hence providing 
methods based on
- * the class, and same contract applies: after using the borrowed object, you 
must either call
- * returnObject() if the object is healthy to return to pool, or 
invalidateObject() if the object
- * should be destroyed.
- *
- * The soft capacity of pool is determined by 
"spark.kafka.consumer.cache.capacity" config value,
- * and the pool will have reasonable default value if the value is not 
provided.
- * (The instance will do its best effort to respect soft capacity but it can 
exceed when there's
- * a borrowing request and there's neither free space nor idle object to 
clear.)
- *
- * This class guarantees that no caller will get pooled object once the object 
is borrowed and
- * not yet returned, hence provide thread-safety usage of non-thread-safe 
[[InternalKafkaConsumer]]
- * unless caller shares the object to multiple threads.
- */
+// TODO: revisit the relation between CacheKey and kafkaParams - for now it 
looks a bit weird
+//   as we force all consumers having same (groupId, topicPartition) to have 
same kafkaParams
+//   which might be viable in performance perspective (kafkaParams might be 
too huge to use
+//   as a part of key), but there might be the case kafkaParams could be 
different -
+//   cache key should be differentiated for both kafkaParams.
 private[kafka010] class InternalKafkaConsumerPool(
-    objectFactory: ObjectFactory,
-    poolConfig: PoolConfig) extends Logging {
+    objectFactory: ConsumerObjectFactory,
+    poolConfig: ConsumerPoolConfig)
+  extends InternalKafkaConnectorPool[CacheKey, InternalKafkaConsumer](
+      objectFactory,
+      poolConfig,
+      new CustomSwallowedExceptionListener("consumer")) {
 
   def this(conf: SparkConf) = {
-    this(new ObjectFactory, new PoolConfig(conf))
-  }
-
-  // the class is intended to have only soft capacity
-  assert(poolConfig.getMaxTotal < 0)
-
-  private val pool = {
-    val internalPool = new GenericKeyedObjectPool[CacheKey, 
InternalKafkaConsumer](
-      objectFactory, poolConfig)
-    
internalPool.setSwallowedExceptionListener(CustomSwallowedExceptionListener)
-    internalPool
-  }
-
-  /**
-   * Borrows [[InternalKafkaConsumer]] object from the pool. If there's no 
idle object for the key,
-   * the pool will create the [[InternalKafkaConsumer]] object.
-   *
-   * If the pool doesn't have idle object for the key and also exceeds the 
soft capacity,
-   * pool will try to clear some of idle objects.
-   *
-   * Borrowed object must be returned by either calling returnObject or 
invalidateObject, otherwise
-   * the object will be kept in pool as active object.
-   */
-  def borrowObject(key: CacheKey, kafkaParams: ju.Map[String, Object]): 
InternalKafkaConsumer = {
-    updateKafkaParamForKey(key, kafkaParams)
-
-    if (size >= poolConfig.softMaxSize) {
-      logWarning("Pool exceeds its soft max size, cleaning up idle objects...")
-      pool.clearOldest()
-    }
-
-    pool.borrowObject(key)
-  }
-
-  /** Returns borrowed object to the pool. */
-  def returnObject(consumer: InternalKafkaConsumer): Unit = {
-    pool.returnObject(extractCacheKey(consumer), consumer)
-  }
-
-  /** Invalidates (destroy) borrowed object to the pool. */
-  def invalidateObject(consumer: InternalKafkaConsumer): Unit = {
-    pool.invalidateObject(extractCacheKey(consumer), consumer)
-  }
-
-  /** Invalidates all idle consumers for the key */
-  def invalidateKey(key: CacheKey): Unit = {
-    pool.clear(key)
-  }
-
-  /**
-   * Closes the keyed object pool. Once the pool is closed,
-   * borrowObject will fail with [[IllegalStateException]], but returnObject 
and invalidateObject
-   * will continue to work, with returned objects destroyed on return.
-   *
-   * Also destroys idle instances in the pool.
-   */
-  def close(): Unit = {
-    pool.close()
-  }
-
-  def reset(): Unit = {
-    // this is the best-effort of clearing up. otherwise we should close the 
pool and create again
-    // but we don't want to make it "var" only because of tests.
-    pool.clear()
+    this(new ConsumerObjectFactory, new ConsumerPoolConfig(conf))
   }
 
-  def numIdle: Int = pool.getNumIdle
-
-  def numIdle(key: CacheKey): Int = pool.getNumIdle(key)
-
-  def numActive: Int = pool.getNumActive
-
-  def numActive(key: CacheKey): Int = pool.getNumActive(key)
-
-  def size: Int = numIdle + numActive
-
-  def size(key: CacheKey): Int = numIdle(key) + numActive(key)
-
-  // TODO: revisit the relation between CacheKey and kafkaParams - for now it 
looks a bit weird
-  //   as we force all consumers having same (groupId, topicPartition) to have 
same kafkaParams
-  //   which might be viable in performance perspective (kafkaParams might be 
too huge to use
-  //   as a part of key), but there might be the case kafkaParams could be 
different -
-  //   cache key should be differentiated for both kafkaParams.
-  private def updateKafkaParamForKey(key: CacheKey, kafkaParams: 
ju.Map[String, Object]): Unit = {
-    // We can assume that kafkaParam should not be different for same cache 
key,
-    // otherwise we can't reuse the cached object and cache key should contain 
kafkaParam.
-    // So it should be safe to put the key/value pair only when the key 
doesn't exist.
-    val oldKafkaParams = objectFactory.keyToKafkaParams.putIfAbsent(key, 
kafkaParams)
-    require(oldKafkaParams == null || kafkaParams == oldKafkaParams, "Kafka 
parameters for same " +
-      s"cache key should be equal. old parameters: $oldKafkaParams new 
parameters: $kafkaParams")
-  }
-
-  private def extractCacheKey(consumer: InternalKafkaConsumer): CacheKey = {
+  protected def createKey(consumer: InternalKafkaConsumer): CacheKey = {
 
 Review comment:
   Fixed.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to