cloud-fan commented on code in PR #36295:
URL: https://github.com/apache/spark/pull/36295#discussion_r903815058
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala:
##########
@@ -407,7 +407,66 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan]
with PredicateHelper wit
case other => (other, false)
}
- def pushDownLimits(plan: LogicalPlan): LogicalPlan = plan.transform {
+ private def pushDownOffset(
+ plan: LogicalPlan,
+ offset: Int): Boolean = plan match {
+ case sHolder: ScanBuilderHolder =>
+ val isPushed = PushDownUtils.pushOffset(sHolder.builder, offset)
+ if (isPushed) {
+ sHolder.pushedOffset = Some(offset)
+ }
+ isPushed
+ case p: Project =>
+ pushDownOffset(p.child, offset)
+ case _ => false
+ }
+
+ def pushDownLimitAndOffset(plan: LogicalPlan): LogicalPlan = plan.transform {
+ case offset @ LimitAndOffset(limit, offsetValue, child) =>
+ val (newChild, canRemoveLimit) = pushDownLimit(child, limit)
+ if (canRemoveLimit) {
+ // If we can remove limit, it indicates data source only have one
partition.
+ // For `dataset.limit(m).offset(n)`, try to push down
`limit(m).offset(n)`.
+ // For example, `dataset.limit(5).offset(3)`, we can push down
`limit(5).offset(3)`.
+ val isPushed = pushDownOffset(newChild, offsetValue)
+ if (isPushed) {
+ newChild
+ } else {
+ // For `dataset.limit(m).offset(n)`, only push down `limit(m)`.
+ // Keep the OFFSET operator if we failed to push down OFFSET to the
data source.
+ offset.withNewChildren(Seq(newChild))
+ }
+ } else {
+ // Keep the OFFSET operator if we can't remove LIMIT operator.
+ offset
+ }
+ case globalLimit @ OffsetAndLimit(offset, limit, child) =>
+ val (newChild, canRemoveLimit) = pushDownLimit(child, limit + offset)
+ if (canRemoveLimit) {
+ // If we can remove limit, it indicates data source only have one
partition.
+ // For `dataset.offset(n).limit(m)`, try to push down `limit(m +
n).offset(n)`.
+ // For example, `dataset.offset(3).limit(5)`, we can push down
`limit(8).offset(3)`.
+ val isPushed = pushDownOffset(newChild, offset)
+ if (isPushed) {
+ newChild
+ } else {
+ // For `dataset.offset(n).limit(m)`, try to push down `limit(m + n)`.
+ // Spark still do `offset(n)`.
+ // For example, `dataset.offset(3).limit(5)`, we can push down
`limit(8)`.
+ // Spark still do `offset(3)`.
+ Offset(Literal(offset), newChild)
+ }
+ } else {
+ // For `dataset.offset(n).limit(m)`, try to push down `offset(n)`.
+ // Spark still do `limit(m)`.
+ val isPushed = pushDownOffset(child, offset)
+ if (isPushed) {
+ globalLimit.withNewChildren(Seq(child))
Review Comment:
is this correct? seems we may lose the local limit operator?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]