cloud-fan commented on code in PR #36295:
URL: https://github.com/apache/spark/pull/36295#discussion_r882685206
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala:
##########
@@ -407,8 +407,65 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan]
with PredicateHelper wit
case other => (other, false)
}
- def pushDownLimits(plan: LogicalPlan): LogicalPlan = plan.transform {
+ private def pushDownOffset(
+ plan: LogicalPlan,
+ offset: Int): Boolean = plan match {
+ case sHolder: ScanBuilderHolder =>
+ val isPushed = PushDownUtils.pushOffset(sHolder.builder, offset)
+ if (isPushed) {
+ sHolder.pushedOffset = Some(offset)
+ }
+ isPushed
+ case p: Project =>
+ pushDownOffset(p.child, offset)
+ case _ => false
+ }
+
+ def pushDownLimitAndOffset(plan: LogicalPlan): LogicalPlan = plan.transform {
+ case offset @ LimitAndOffset(limit, offsetValue, child) =>
+ val (newChild, canRemoveLimit) = pushDownLimit(child, limit)
+ if (canRemoveLimit) {
+ // If we can remove limit, it indicates data source only have one
partition.
+ // For `dataset.limit(m).offset(n)`, try to push down `LIMIT (m - n)
OFFSET n`.
+ // For example, `dataset.limit(5).offset(3)`, we can push down `LIMIT
2 OFFSET 3`.
Review Comment:
This comment makes no sense here. We are pushing down operators, not pushing
down a SQL query to JDBC. It's only a problem in JDBC that `LIMIT x OFFSET y`
means doing OFFSET first, then LIMIT
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]