cloud-fan commented on code in PR #36295:
URL: https://github.com/apache/spark/pull/36295#discussion_r902324439


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala:
##########
@@ -407,8 +407,69 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan] 
with PredicateHelper wit
     case other => (other, false)
   }
 
-  def pushDownLimits(plan: LogicalPlan): LogicalPlan = plan.transform {
+  private def pushDownOffset(
+      plan: LogicalPlan,
+      offset: Int): Boolean = plan match {
+    case sHolder: ScanBuilderHolder =>
+      val isPushed = PushDownUtils.pushOffset(sHolder.builder, offset)
+      if (isPushed) {
+        sHolder.pushedOffset = Some(offset)
+      }
+      isPushed
+    case p: Project =>
+      pushDownOffset(p.child, offset)
+    case _ => false
+  }
+
+  def pushDownLimitAndOffset(plan: LogicalPlan): LogicalPlan = plan.transform {
+    case offset @ LimitAndOffset(limit, offsetValue, child) =>
+      val (newChild, canRemoveLimit) = pushDownLimit(child, limit)
+      if (canRemoveLimit) {
+        // If we can remove limit, it indicates data source only have one 
partition.
+        // For `dataset.limit(m).offset(n)`, try to push down 
`offset(n).limit(m - n)`.
+        // For example, `dataset.limit(5).offset(3)`, we can push down 
`offset(3).limit(2)`.
+        val isPushed = pushDownOffset(newChild, offsetValue)
+        if (isPushed) {
+          newChild
+        } else {
+          // For `dataset.limit(m).offset(n)`, only push down `limit(m)`.
+          // Spark still do `offset(n)`.
+          offset.withNewChildren(Seq(newChild))
+        }
+      } else {
+        // If we can't push down limit and offset, return `Offset`.
+        offset
+      }
+    case globalLimit @ OffsetAndLimit(offset, limit, child) =>
+      val (newChild, canRemoveLimit) = pushDownLimit(child, limit + offset)
+      if (canRemoveLimit) {
+        // If we can remove limit, it indicates data source only have one 
partition.
+        // For `dataset.offset(n).limit(m)`, try to push down `limit(m + 
n).offset(n)`.
+        // For example, `dataset.offset(3).limit(5)`, we can push down 
`limit(8).offset(3)`.
+        val isPushed = pushDownOffset(newChild, offset)
+        if (isPushed) {
+          newChild
+        } else {
+          // For `dataset.offset(n).limit(m)`, try to push down `limit(m + n)`.
+          // Spark still do `offset(n).limit(m)`.
+          // For example, `dataset.offset(3).limit(5)`, we can push down 
`limit(8)`.
+          // Spark still do `offset(3).limit(5)`.
+          val newOffset = 
globalLimit.child.asInstanceOf[Offset].withNewChildren(Seq(newChild))
+          globalLimit.withNewChildren(Seq(newOffset))
+        }
+      } else {
+        // For `dataset.offset(n).limit(m)`, try to push down `offset(n)`.
+        // Spark still do `limit(m)`.
+        val isPushed = pushDownOffset(child, offset)
+        if (isPushed) {
+          globalLimit.withNewChildren(Seq(child))
+        } else {
+          // If we can't push down limit and offset, return `GlobalLimit`.
+          globalLimit
+        }
+      }
     case globalLimit @ Limit(IntegerLiteral(limitValue), child) =>
+      // For `dataset.limit(m)`, try to push down `limit(m)`.

Review Comment:
   this comment doesn't seem to provide any value...



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to