tomasbartalos commented on a change in pull request #23749: [SPARK-26841][SQL]
Kafka timestamp pushdown
URL: https://github.com/apache/spark/pull/23749#discussion_r266466914
##########
File path:
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaRelation.scala
##########
@@ -106,19 +112,49 @@ private[kafka010] class KafkaRelation(
val rdd = new KafkaSourceRDD(
sqlContext.sparkContext, executorKafkaParams, offsetRanges,
pollTimeoutMs, failOnDataLoss, reuseKafkaConsumer = false).map { cr =>
- InternalRow(
- cr.key,
- cr.value,
- UTF8String.fromString(cr.topic),
- cr.partition,
- cr.offset,
- DateTimeUtils.fromJavaTimestamp(new java.sql.Timestamp(cr.timestamp)),
- cr.timestampType.id)
+ val columns =
requiredColumns.map{KafkaRelation.columnToValueExtractor(_)(cr)}
+ InternalRow.fromSeq(columns)
+ }
+ val schemaProjected = StructType(requiredColumns.map{schema(_)})
+ sqlContext.internalCreateDataFrame(rdd.setName("kafka"),
schemaProjected).rdd
+ }
+
+ def invalidateEmptyOffsets(
+ startOffset: Map[TopicPartition, Long],
+ endOffset: Map[TopicPartition, Long]):
+ (Map[TopicPartition, Long], Map[TopicPartition, Long]) = {
+
+ val merged = startOffset.map { case (k, v) => k -> ((v, endOffset(k)))}
+ val invalidated = merged.map {
+ case(k, (start, end)) if start != EMPTY_OFFSET && end != EMPTY_OFFSET =>
+ k -> ((start, end))
+ case(k, _) => k -> ((0L, 0L))
}
- sqlContext.internalCreateDataFrame(rdd.setName("kafka"), schema).rdd
+ (invalidated.map{case(k, (start, _)) =>
+ k -> start}, invalidated.map{case(k, (_, end)) => k -> end})
+ }
+
+ private def areOffsetsInLine(fromOffset: Long, untilOffset: Long): Boolean =
{
+ untilOffset > fromOffset || untilOffset < 0 || fromOffset < 0
+ }
+
+ private def getEndingPartitionOffsets(
+ kafkaReader: KafkaOffsetReader,
+ filters: Array[Filter]): Map[TopicPartition, Long] = {
+
+ val offsetsByLimit = getPartitionOffsetsByRangeLimit(kafkaReader,
endingOffsets)
+ getEndingPartitionOffsetsByFilter(kafkaReader, offsetsByLimit, filters)
Review comment:
Probably the method naming `getPartitionOffsetsByRangeLimit` is confusing.
To Kafka we can pushdown only offsets, so what we need to do is to merge
offsets specified as DS option (startingOffsets, endingOffsets) with offsets
obtained from timestamp filter.
**Example:** DS option have offsets range of 100 - 200
`startingOffsets '{"topic" : {"0" : 100}}', startingOffsets '{"topic" : {"0"
: 200}}');`
but timestamp pushdown `where timestamp > x and timestamp < y` have offset
range of let's say 150 - 300
The merge result is highest of starting to lowest of ending = **150 - 200**
I think `getEndingPartitionOffsetsByFilter` does what is says, but it's
worth to rename the `getPartitionOffsetsByRangeLimit`. What do you think is a
good name ?
Maybe: `getPartitionOffsetsFromDSOption` ?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]