HeartSaVioR commented on a change in pull request #22138: [SPARK-25151][SS] 
Apply Apache Commons Pool to KafkaDataConsumer
URL: https://github.com/apache/spark/pull/22138#discussion_r317781412
 
 

 ##########
 File path: 
external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/FetchedDataPoolSuite.scala
 ##########
 @@ -0,0 +1,337 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.kafka010
+
+import java.{util => ju}
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+import org.apache.kafka.clients.consumer.ConsumerRecord
+import org.apache.kafka.common.TopicPartition
+import org.scalatest.PrivateMethodTester
+
+import org.apache.spark.SparkEnv
+import org.apache.spark.sql.kafka010.KafkaDataConsumer.CacheKey
+import org.apache.spark.sql.test.SharedSparkSession
+
+class FetchedDataPoolSuite extends SharedSparkSession with PrivateMethodTester 
{
+  import FetchedDataPool._
+  type Record = ConsumerRecord[Array[Byte], Array[Byte]]
+
+  private val dummyBytes = "dummy".getBytes
+
+  // Helper private method accessors for FetchedDataPool
+  private type PoolCacheType = mutable.Map[CacheKey, CachedFetchedDataList]
+  private val _cache = PrivateMethod[PoolCacheType]('cache)
+
+  def getCache(pool: FetchedDataPool): PoolCacheType = {
+    pool.invokePrivate(_cache())
+  }
+
+  test("acquire fresh one") {
+    val dataPool = FetchedDataPool.build
+
+    val cacheKey = CacheKey("testgroup", new TopicPartition("topic", 0))
+
+    assert(getCache(dataPool).get(cacheKey).isEmpty)
+
+    val data = dataPool.acquire(cacheKey, 0)
+
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 1, 
expectedNumTotal = 1)
+    assert(getCache(dataPool)(cacheKey).size === 1)
+    assert(getCache(dataPool)(cacheKey).head.inUse)
+
+    data.withNewPoll(testRecords(0, 5).listIterator, 5)
+
+    dataPool.release(cacheKey, data)
+
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 1, 
expectedNumTotal = 1)
+    assert(getCache(dataPool)(cacheKey).size === 1)
+    assert(!getCache(dataPool)(cacheKey).head.inUse)
+
+    dataPool.shutdown()
+  }
+
+  test("acquire fetched data from multiple keys") {
+    val dataPool = FetchedDataPool.build
+
+    val cacheKeys = (0 until 10).map { partId =>
+      CacheKey("testgroup", new TopicPartition("topic", partId))
+    }
+
+    assert(getCache(dataPool).size === 0)
+    cacheKeys.foreach { key => assert(getCache(dataPool).get(key).isEmpty) }
+
+    val dataList = cacheKeys.map(key => (key, dataPool.acquire(key, 0)))
+
+    assert(getCache(dataPool).size === cacheKeys.size)
+    cacheKeys.map { key =>
+      assert(getCache(dataPool)(key).size === 1)
+      assert(getCache(dataPool)(key).head.inUse)
+    }
+
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 10, 
expectedNumTotal = 10)
+
+    dataList.map { case (_, data) =>
+      data.withNewPoll(testRecords(0, 5).listIterator, 5)
+    }
+
+    dataList.foreach { case (key, data) =>
+      dataPool.release(key, data)
+    }
+
+    assert(getCache(dataPool).size === cacheKeys.size)
+    cacheKeys.map { key =>
+      assert(getCache(dataPool)(key).size === 1)
+      assert(!getCache(dataPool)(key).head.inUse)
+    }
+
+    dataPool.shutdown()
+  }
+
+  test("continuous use of fetched data from single key") {
+    val dataPool = FetchedDataPool.build
+
+    val cacheKey = CacheKey("testgroup", new TopicPartition("topic", 0))
+
+    assert(getCache(dataPool).get(cacheKey).isEmpty)
+
+    val data = dataPool.acquire(cacheKey, 0)
+
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 1, 
expectedNumTotal = 1)
+    assert(getCache(dataPool)(cacheKey).size === 1)
+    assert(getCache(dataPool)(cacheKey).head.inUse)
+
+    data.withNewPoll(testRecords(0, 5).listIterator, 5)
+
+    (0 to 3).foreach { _ => data.next() }
+
+    dataPool.release(cacheKey, data)
+
+    // suppose next batch
+
+    val data2 = dataPool.acquire(cacheKey, data.nextOffsetInFetchedData)
+
+    assert(data.eq(data2))
+
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 1, 
expectedNumTotal = 1)
+    assert(getCache(dataPool)(cacheKey).size === 1)
+    assert(getCache(dataPool)(cacheKey).head.inUse)
+
+    dataPool.release(cacheKey, data2)
+
+    assert(getCache(dataPool)(cacheKey).size === 1)
+    assert(!getCache(dataPool)(cacheKey).head.inUse)
+
+    dataPool.shutdown()
+  }
+
+  test("multiple tasks referring same key continuously using fetched data") {
+    val dataPool = FetchedDataPool.build
+
+    val cacheKey = CacheKey("testgroup", new TopicPartition("topic", 0))
+
+    assert(getCache(dataPool).get(cacheKey).isEmpty)
+
+    val dataFromTask1 = dataPool.acquire(cacheKey, 0)
+
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 1, 
expectedNumTotal = 1)
+    assert(getCache(dataPool)(cacheKey).size === 1)
+    assert(getCache(dataPool)(cacheKey).head.inUse)
+
+    val dataFromTask2 = dataPool.acquire(cacheKey, 0)
+
+    // it shouldn't give same object as dataFromTask1 though it asks same 
offset
+    // it definitely works when offsets are not overlapped: skip adding test 
for that
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 2, 
expectedNumTotal = 2)
+    assert(getCache(dataPool)(cacheKey).size === 2)
+    assert(getCache(dataPool)(cacheKey)(1).inUse)
+
+    // reading from task 1
+    dataFromTask1.withNewPoll(testRecords(0, 5).listIterator, 5)
+
+    (0 to 3).foreach { _ => dataFromTask1.next() }
+
+    dataPool.release(cacheKey, dataFromTask1)
+
+    // reading from task 2
+    dataFromTask2.withNewPoll(testRecords(0, 30).listIterator, 30)
+
+    (0 to 5).foreach { _ => dataFromTask2.next() }
+
+    dataPool.release(cacheKey, dataFromTask2)
+
+    // suppose next batch for task 1
+    val data2FromTask1 = dataPool.acquire(cacheKey, 
dataFromTask1.nextOffsetInFetchedData)
+    assert(data2FromTask1.eq(dataFromTask1))
+
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 2, 
expectedNumTotal = 2)
+    assert(getCache(dataPool)(cacheKey).head.inUse)
+
+    // suppose next batch for task 2
+    val data2FromTask2 = dataPool.acquire(cacheKey, 
dataFromTask2.nextOffsetInFetchedData)
+    assert(data2FromTask2.eq(dataFromTask2))
+
+    assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 2, 
expectedNumTotal = 2)
+    assert(getCache(dataPool)(cacheKey)(1).inUse)
+
+    // release from task 2
+    dataPool.release(cacheKey, data2FromTask2)
+    assert(!getCache(dataPool)(cacheKey)(1).inUse)
+
+    // release from task 1
+    dataPool.release(cacheKey, data2FromTask1)
+    assert(!getCache(dataPool)(cacheKey).head.inUse)
+
+    dataPool.shutdown()
+  }
+
+  test("evict idle fetched data") {
+    import FetchedDataPool._
+    import org.scalatest.time.SpanSugar._
+
+    val minEvictableIdleTimeMillis = 1000
+    val evictorThreadRunIntervalMillis = 500
+
+    val newConf = Seq(
+      CONFIG_NAME_MIN_EVICTABLE_IDLE_TIME_MILLIS -> 
minEvictableIdleTimeMillis.toString,
+      CONFIG_NAME_EVICTOR_THREAD_RUN_INTERVAL_MILLIS -> 
evictorThreadRunIntervalMillis.toString)
+
+    withSparkConf(newConf: _*) {
+      val dataPool = FetchedDataPool.build
+
+      val cacheKeys = (0 until 10).map { partId =>
+        CacheKey("testgroup", new TopicPartition("topic", partId))
+      }
+
+      val dataList = cacheKeys.map(key => (key, dataPool.acquire(key, 0)))
+
+      assertFetchedDataPoolStatistic(dataPool, expectedNumCreated = 10, 
expectedNumTotal = 10)
+
+      dataList.map { case (_, data) =>
+        data.withNewPoll(testRecords(0, 5).listIterator, 5)
+      }
+
+      val dataToEvict = dataList.take(3)
+      dataToEvict.foreach { case (key, data) =>
 
 Review comment:
   Yeah I see the point. Maybe I considered similar one but given commons pool 
just relies on ScheduledThreadPoolExecutor for evictor thread which doesn't 
seem to deal with manual clock, I might gave up.
   
   Given pool allows calling evict directly, I'll try to add the test which 
doesn't rely on eviction thread (though it still relies on wall-time as we've 
got time from System).

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to