gaborgsomogyi commented on a change in pull request #23747: [SPARK-26848][SQL] 
Introduce new option to Kafka source: offset by timestamp (starting/ending)
URL: https://github.com/apache/spark/pull/23747#discussion_r266382720
 
 

 ##########
 File path: 
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReader.scala
 ##########
 @@ -135,23 +135,79 @@ private[kafka010] class KafkaOffsetReader(
   def fetchSpecificOffsets(
       partitionOffsets: Map[TopicPartition, Long],
       reportDataLoss: String => Unit): KafkaSourceOffset = {
-    val fetched = runUninterruptibly {
-      withRetriesWithoutInterrupt {
-        // Poll to get the latest assigned partitions
-        consumer.poll(0)
-        val partitions = consumer.assignment()
+    val fnAssertParametersWithPartitions: ju.Set[TopicPartition] => Unit = { 
partitions =>
+      assert(partitions.asScala == partitionOffsets.keySet,
+        "If startingOffsets contains specific offsets, you must specify all 
TopicPartitions.\n" +
+          "Use -1 for latest, -2 for earliest, if you don't care.\n" +
+          s"Specified: ${partitionOffsets.keySet} Assigned: 
${partitions.asScala}")
+      logDebug(s"Partitions assigned to consumer: $partitions. Seeking to 
$partitionOffsets")
+    }
 
-        // Call `position` to wait until the potential offset request 
triggered by `poll(0)` is
-        // done. This is a workaround for KAFKA-7703, which an async 
`seekToBeginning` triggered by
-        // `poll(0)` may reset offsets that should have been set by another 
request.
-        partitions.asScala.map(p => p -> consumer.position(p)).foreach(_ => {})
+    val fnRetrievePartitionOffsets: ju.Set[TopicPartition] => 
Map[TopicPartition, Long] = { _ =>
+      partitionOffsets
+    }
+
+    val fnAssertFetchedOffsets: Map[TopicPartition, Long] => Unit = { fetched 
=>
+      partitionOffsets.foreach {
+        case (tp, off) if off != KafkaOffsetRangeLimit.LATEST &&
+          off != KafkaOffsetRangeLimit.EARLIEST =>
+          if (fetched(tp) != off) {
+            reportDataLoss(
+              s"startingOffsets for $tp was $off but consumer reset to 
${fetched(tp)}")
+          }
+        case _ =>
+        // no real way to check that beginning or end is reasonable
+      }
+    }
+
+    fetchSpecificOffsets0(fnAssertParametersWithPartitions, 
fnRetrievePartitionOffsets,
+      fnAssertFetchedOffsets)
+  }
+
+  def fetchSpecificTimestampBasedOffsets(topicTimestamps: Map[String, Long]): 
KafkaSourceOffset = {
+    val fnAssertParametersWithPartitions: ju.Set[TopicPartition] => Unit = { 
partitions =>
+      val assignedTopics = partitions.asScala.map(_.topic())
+      assert(assignedTopics == topicTimestamps.keySet,
+        "If starting/endingOffsetsByTimestamp contains specific offsets, you 
must specify all " +
+          s"topics. Specified: ${topicTimestamps.keySet} Assigned: 
$assignedTopics")
+      logDebug(s"Partitions assigned to consumer: $partitions. Seeking to 
$topicTimestamps")
+    }
+
+    val fnRetrievePartitionOffsets: ju.Set[TopicPartition] => 
Map[TopicPartition, Long] = {
+      partitions => {
+        val partitionTimestamps: ju.Map[TopicPartition, java.lang.Long] =
+          partitions.asScala.map { topicAndPartition =>
+            topicAndPartition -> 
java.lang.Long.valueOf(topicTimestamps(topicAndPartition.topic()))
+          }.toMap.asJava
+
+        val offsetForTime: ju.Map[TopicPartition, OffsetAndTimestamp] =
+          consumer.offsetsForTimes(partitionTimestamps)
 
 Review comment:
   2 concerns:
   * If the message format version in a partition is before 0.10.0, i.e. the 
messages do not have timestamps, null will be returned for that partition. You 
mentioned it's not probable that the broker is older than 0.10.0 which I tend 
to agree but here the broker is new but the handled Kafka log is old.
   * This suffers from a classic Kafka issue, namely this method may block 
indefinitely if the partition does not exist.
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to