jose-torres commented on a change in pull request #23749: [SPARK-26841][SQL] 
Kafka timestamp pushdown
URL: https://github.com/apache/spark/pull/23749#discussion_r266095945
 
 

 ##########
 File path: 
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaRelation.scala
 ##########
 @@ -106,19 +112,49 @@ private[kafka010] class KafkaRelation(
     val rdd = new KafkaSourceRDD(
       sqlContext.sparkContext, executorKafkaParams, offsetRanges,
       pollTimeoutMs, failOnDataLoss, reuseKafkaConsumer = false).map { cr =>
-      InternalRow(
-        cr.key,
-        cr.value,
-        UTF8String.fromString(cr.topic),
-        cr.partition,
-        cr.offset,
-        DateTimeUtils.fromJavaTimestamp(new java.sql.Timestamp(cr.timestamp)),
-        cr.timestampType.id)
+        val columns = 
requiredColumns.map{KafkaRelation.columnToValueExtractor(_)(cr)}
+        InternalRow.fromSeq(columns)
+      }
+    val schemaProjected = StructType(requiredColumns.map{schema(_)})
+    sqlContext.internalCreateDataFrame(rdd.setName("kafka"), 
schemaProjected).rdd
+  }
+
+  def invalidateEmptyOffsets(
+                              startOffset: Map[TopicPartition, Long],
+                              endOffset: Map[TopicPartition, Long]):
+  (Map[TopicPartition, Long], Map[TopicPartition, Long]) = {
+
+    val merged = startOffset.map { case (k, v) => k -> ((v, endOffset(k)))}
+    val invalidated = merged.map {
+      case(k, (start, end)) if start != EMPTY_OFFSET && end != EMPTY_OFFSET =>
+        k -> ((start, end))
+      case(k, _) => k -> ((0L, 0L))
     }
-    sqlContext.internalCreateDataFrame(rdd.setName("kafka"), schema).rdd
+    (invalidated.map{case(k, (start, _)) =>
+      k -> start}, invalidated.map{case(k, (_, end)) => k -> end})
+  }
+
+  private def areOffsetsInLine(fromOffset: Long, untilOffset: Long): Boolean = 
{
+    untilOffset > fromOffset || untilOffset < 0 || fromOffset < 0
+  }
+
+  private def getEndingPartitionOffsets(
+      kafkaReader: KafkaOffsetReader,
+      filters: Array[Filter]): Map[TopicPartition, Long] = {
+
+    val offsetsByLimit = getPartitionOffsetsByRangeLimit(kafkaReader, 
endingOffsets)
+    getEndingPartitionOffsetsByFilter(kafkaReader, offsetsByLimit, filters)
 
 Review comment:
   I'm not sure the naming quite makes sense to me here. It's weird to have to 
apply multiple levels of "getPartitionOffsets" before obtaining the actual 
partition offsets that should be used.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to