andygrove commented on code in PR #2566:
URL: https://github.com/apache/arrow-datafusion/pull/2566#discussion_r876076897


##########
datafusion/core/src/optimizer/limit_push_down.rs:
##########
@@ -278,4 +306,174 @@ mod test {
 
         Ok(())
     }
+
+    #[test]
+    fn limit_pushdown_with_offset_projection_table_provider() -> Result<()> {
+        let table_scan = test_table_scan()?;
+
+        let plan = LogicalPlanBuilder::from(table_scan)
+            .project(vec![col("a")])?
+            .offset(10)?
+            .limit(1000)?
+            .build()?;
+
+        // Should push the limit down to table provider
+        // When it has a select
+        let expected = "Limit: 1000\
+        \n  Offset: 10\
+        \n    Projection: #test.a\
+        \n      TableScan: test projection=None, limit=1010";
+
+        assert_optimized_plan_eq(&plan, expected);
+
+        Ok(())
+    }
+
+    #[test]
+    fn limit_pushdown_with_offset_after_limit() -> Result<()> {
+        let table_scan = test_table_scan()?;
+
+        let plan = LogicalPlanBuilder::from(table_scan)
+            .project(vec![col("a")])?
+            .limit(1000)?
+            .offset(10)?
+            .build()?;
+
+        // Not push the limit down to table provider
+        // When offset after limit
+        let expected = "Offset: 10\
+        \n  Limit: 1000\
+        \n    Projection: #test.a\
+        \n      TableScan: test projection=None, limit=1000";
+
+        assert_optimized_plan_eq(&plan, expected);
+
+        Ok(())
+    }
+
+    #[test]
+    fn limit_push_down_with_offset_take_smaller_limit() -> Result<()> {
+        let table_scan = test_table_scan()?;
+
+        let plan = LogicalPlanBuilder::from(table_scan)
+            .offset(10)?
+            .limit(1000)?
+            .limit(10)?
+            .build()?;
+
+        // Should push down the smallest limit
+        // Towards table scan
+        // This rule doesn't replace multiple limits
+        let expected = "Limit: 10\
+        \n  Limit: 10\
+        \n    Offset: 10\
+        \n      TableScan: test projection=None, limit=20";
+
+        assert_optimized_plan_eq(&plan, expected);
+
+        Ok(())
+    }
+
+    #[test]
+    fn limit_doesnt_push_down_with_offset_aggregation() -> Result<()> {
+        let table_scan = test_table_scan()?;
+
+        let plan = LogicalPlanBuilder::from(table_scan)
+            .aggregate(vec![col("a")], vec![max(col("b"))])?
+            .offset(10)?
+            .limit(1000)?
+            .build()?;
+
+        // Limit should *not* push down aggregate node
+        let expected = "Limit: 1000\
+        \n  Offset: 10\
+        \n    Aggregate: groupBy=[[#test.a]], aggr=[[MAX(#test.b)]]\
+        \n      TableScan: test projection=None";
+
+        assert_optimized_plan_eq(&plan, expected);
+
+        Ok(())
+    }
+
+    #[test]
+    fn limit_should_push_down_with_offset_union() -> Result<()> {
+        let table_scan = test_table_scan()?;
+
+        let plan = LogicalPlanBuilder::from(table_scan.clone())
+            .union(LogicalPlanBuilder::from(table_scan).build()?)?
+            .offset(10)?
+            .limit(1000)?
+            .build()?;
+
+        // Limit should push down through union
+        let expected = "Limit: 1000\
+        \n  Offset: 10\
+        \n    Union\
+        \n      Limit: 1010\
+        \n        TableScan: test projection=None, limit=1010\
+        \n      Limit: 1010\
+        \n        TableScan: test projection=None, limit=1010";
+
+        assert_optimized_plan_eq(&plan, expected);
+
+        Ok(())
+    }
+
+    #[test]
+    fn limit_should_push_down_with_offset_join() -> Result<()> {

Review Comment:
   ```suggestion
       fn limit_should_not_push_down_with_offset_join() -> Result<()> {
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to