Github user gaborgsomogyi commented on a diff in the pull request:
https://github.com/apache/spark/pull/22138#discussion_r214716234
--- Diff:
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/FetchedDataPool.scala
---
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.kafka010
+
+import java.{util => ju}
+import java.util.concurrent.TimeUnit
+
+import scala.collection.mutable
+
+import org.apache.kafka.clients.consumer.ConsumerRecord
+
+import org.apache.spark.SparkEnv
+import org.apache.spark.sql.kafka010.KafkaDataConsumer.{CacheKey,
UNKNOWN_OFFSET}
+import org.apache.spark.util.ThreadUtils
+
+/**
+ * Provides object pool for [[FetchedData]] which is grouped by
[[CacheKey]].
+ *
+ * Along with CacheKey, it receives desired start offset to find cached
FetchedData which
+ * may be stored from previous batch. If it can't find one to match, it
will create
+ * a new FetchedData.
+ */
+private[kafka010] class FetchedDataPool {
+ import FetchedDataPool._
+
+ private[kafka010] case class CachedFetchedData(fetchedData: FetchedData)
{
+ var lastReleasedTimestamp: Long = Long.MaxValue
+ var lastAcquiredTimestamp: Long = Long.MinValue
+ var inUse: Boolean = false
+
+ def getObject: FetchedData = fetchedData
+ }
+
+ private object CachedFetchedData {
+ def empty(): CachedFetchedData = {
+ val emptyData = FetchedData(
+ ju.Collections.emptyListIterator[ConsumerRecord[Array[Byte],
Array[Byte]]],
+ UNKNOWN_OFFSET,
+ UNKNOWN_OFFSET)
+
+ CachedFetchedData(emptyData)
+ }
+ }
+
+ private type CachedFetchedDataList =
mutable.ListBuffer[CachedFetchedData]
+
+ private val cache: mutable.Map[CacheKey, CachedFetchedDataList] =
mutable.HashMap.empty
+
+ /** Retrieve internal cache. This method is only for testing. */
+ private[kafka010] def getCache: mutable.Map[CacheKey,
CachedFetchedDataList] = cache
+
+ private val (minEvictableIdleTimeMillis,
evictorThreadRunIntervalMillis): (Long, Long) = {
+ val conf = SparkEnv.get.conf
+
+ val minEvictIdleTime =
conf.getLong(CONFIG_NAME_MIN_EVICTABLE_IDLE_TIME_MILLIS,
+ DEFAULT_VALUE_MIN_EVICTABLE_IDLE_TIME_MILLIS)
+
+ val evictorThreadInterval = conf.getLong(
+ CONFIG_NAME_EVICTOR_THREAD_RUN_INTERVAL_MILLIS,
+ DEFAULT_VALUE_EVICTOR_THREAD_RUN_INTERVAL_MILLIS)
+
+ (minEvictIdleTime, evictorThreadInterval)
+ }
+
+ private val executorService =
ThreadUtils.newDaemonSingleThreadScheduledExecutor(
+ "kafka-fetched-data--cache-evictor")
--- End diff --
Nit: `kafka-fetched-data-cache-evictor`
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]