cloud-fan commented on code in PR #36295:
URL: https://github.com/apache/spark/pull/36295#discussion_r902319260
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala:
##########
@@ -407,8 +407,69 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan]
with PredicateHelper wit
case other => (other, false)
}
- def pushDownLimits(plan: LogicalPlan): LogicalPlan = plan.transform {
+ private def pushDownOffset(
+ plan: LogicalPlan,
+ offset: Int): Boolean = plan match {
+ case sHolder: ScanBuilderHolder =>
+ val isPushed = PushDownUtils.pushOffset(sHolder.builder, offset)
+ if (isPushed) {
+ sHolder.pushedOffset = Some(offset)
+ }
+ isPushed
+ case p: Project =>
+ pushDownOffset(p.child, offset)
+ case _ => false
+ }
+
+ def pushDownLimitAndOffset(plan: LogicalPlan): LogicalPlan = plan.transform {
+ case offset @ LimitAndOffset(limit, offsetValue, child) =>
+ val (newChild, canRemoveLimit) = pushDownLimit(child, limit)
+ if (canRemoveLimit) {
+ // If we can remove limit, it indicates data source only have one
partition.
+ // For `dataset.limit(m).offset(n)`, try to push down
`offset(n).limit(m - n)`.
+ // For example, `dataset.limit(5).offset(3)`, we can push down
`offset(3).limit(2)`.
Review Comment:
The comment doesn't match the code. What it says are handled by
`LimitAndOffset`. How about
```
// Only push down OFFSET if LIMIT has been fully pushed.
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]