alamb commented on code in PR #11652:
URL: https://github.com/apache/datafusion/pull/11652#discussion_r1693942386


##########
datafusion/sqllogictest/test_files/group_by.slt:
##########
@@ -4334,8 +4335,9 @@ physical_plan
 01)GlobalLimitExec: skip=0, fetch=5
 02)--SortPreservingMergeExec: [name@0 DESC,time_chunks@1 DESC], fetch=5
 03)----ProjectionExec: expr=[name@0 as name, date_bin(IntervalMonthDayNano { 
months: 0, days: 0, nanoseconds: 900000000000 }, ts@1) as time_chunks]
-04)------RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1
-05)--------StreamingTableExec: partition_sizes=1, projection=[name, ts], 
infinite_source=true, output_ordering=[name@0 DESC, ts@1 DESC]
+04)------LocalLimitExec: fetch=5
+05)--------RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1

Review Comment:
   If I am able to pull the CoalesceBatch into Repartition I think adding 
support for limit in repartition will become quite easy (as the actual limit 
code will be reused)



##########
datafusion/core/src/physical_optimizer/limit_pushdown.rs:
##########
@@ -0,0 +1,661 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! This rule reduces the amount of data transferred by pushing down limits as 
much as possible.
+
+use std::fmt::Debug;
+use std::sync::Arc;
+
+use crate::error::Result;
+use crate::physical_optimizer::PhysicalOptimizerRule;
+use crate::physical_plan::ExecutionPlan;
+
+use datafusion_common::config::ConfigOptions;
+use datafusion_common::plan_datafusion_err;
+use datafusion_common::tree_node::{Transformed, TransformedResult, TreeNode};
+use datafusion_optimizer::push_down_limit::combine_limit;
+use datafusion_physical_plan::coalesce_partitions::CoalescePartitionsExec;
+use datafusion_physical_plan::limit::{GlobalLimitExec, LocalLimitExec};
+use 
datafusion_physical_plan::sorts::sort_preserving_merge::SortPreservingMergeExec;
+
+/// This rule inspects [`ExecutionPlan`]'s and pushes down the fetch limit from
+/// the parent to the child if applicable.
+#[derive(Default)]
+pub struct LimitPushdown {}
+
+impl LimitPushdown {
+    #[allow(missing_docs)]
+    pub fn new() -> Self {
+        Self {}
+    }
+}
+
+impl PhysicalOptimizerRule for LimitPushdown {
+    fn optimize(
+        &self,
+        plan: Arc<dyn ExecutionPlan>,
+        _config: &ConfigOptions,
+    ) -> Result<Arc<dyn ExecutionPlan>> {
+        plan.transform_down(push_down_limits).data()
+    }
+
+    fn name(&self) -> &str {
+        "LimitPushdown"
+    }
+
+    fn schema_check(&self) -> bool {
+        true
+    }
+}
+
+/// This enumeration makes `skip` and `fetch` calculations easier by providing
+/// a single API for both local and global limit operators.
+#[derive(Debug)]
+enum LimitExec {
+    Global(GlobalLimitExec),
+    Local(LocalLimitExec),
+}
+
+impl LimitExec {
+    fn input(&self) -> &Arc<dyn ExecutionPlan> {
+        match self {
+            Self::Global(global) => global.input(),
+            Self::Local(local) => local.input(),
+        }
+    }
+
+    fn fetch(&self) -> Option<usize> {
+        match self {
+            Self::Global(global) => global.fetch(),
+            Self::Local(local) => Some(local.fetch()),
+        }
+    }
+
+    fn skip(&self) -> usize {
+        match self {
+            Self::Global(global) => global.skip(),
+            Self::Local(_) => 0,
+        }
+    }
+
+    fn with_child(&self, child: Arc<dyn ExecutionPlan>) -> Self {
+        match self {
+            Self::Global(global) => {
+                Self::Global(GlobalLimitExec::new(child, global.skip(), 
global.fetch()))
+            }
+            Self::Local(local) => Self::Local(LocalLimitExec::new(child, 
local.fetch())),
+        }
+    }
+}
+
+impl From<LimitExec> for Arc<dyn ExecutionPlan> {
+    fn from(limit_exec: LimitExec) -> Self {
+        match limit_exec {
+            LimitExec::Global(global) => Arc::new(global),
+            LimitExec::Local(local) => Arc::new(local),
+        }
+    }
+}
+
+/// Pushes down the limit through the plan.
+pub fn push_down_limits(
+    plan: Arc<dyn ExecutionPlan>,
+) -> Result<Transformed<Arc<dyn ExecutionPlan>>> {
+    let maybe_modified = if let Some(limit_exec) = extract_limit(&plan) {
+        let child = limit_exec.input();
+        if let Some(child_limit) = extract_limit(child) {
+            let merged = merge_limits(&limit_exec, &child_limit);
+            // Revisit current node in case of consecutive pushdowns
+            Some(push_down_limits(merged)?.data)
+        } else if child.supports_limit_pushdown() {
+            try_push_down_limit(&limit_exec, child.clone())?
+        } else {
+            add_fetch_to_child(&limit_exec, child.clone())
+        }
+    } else {
+        None
+    };
+
+    Ok(maybe_modified.map_or(Transformed::no(plan), Transformed::yes))
+}
+
+/// Transforms the [`ExecutionPlan`] into a [`LimitExec`] if it is a
+/// [`GlobalLimitExec`] or a [`LocalLimitExec`].
+fn extract_limit(plan: &Arc<dyn ExecutionPlan>) -> Option<LimitExec> {
+    if let Some(global_limit) = 
plan.as_any().downcast_ref::<GlobalLimitExec>() {
+        Some(LimitExec::Global(GlobalLimitExec::new(
+            global_limit.input().clone(),
+            global_limit.skip(),
+            global_limit.fetch(),
+        )))
+    } else {
+        plan.as_any()
+            .downcast_ref::<LocalLimitExec>()
+            .map(|local_limit| {
+                LimitExec::Local(LocalLimitExec::new(
+                    local_limit.input().clone(),
+                    local_limit.fetch(),
+                ))
+            })
+    }
+}
+
+/// Merge the limits of the parent and the child. If at least one of them is a
+/// [`GlobalLimitExec`], the result is also a [`GlobalLimitExec`]. Otherwise,
+/// the result is a [`LocalLimitExec`].
+fn merge_limits(

Review Comment:
   structurally this could be a method on `LimitExec` too , though this is 
totally fine too



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org
For additional commands, e-mail: github-h...@datafusion.apache.org

Reply via email to