alamb commented on code in PR #6354: URL: https://github.com/apache/arrow-datafusion/pull/6354#discussion_r1196940807
########## datafusion/core/src/physical_plan/insert.rs: ########## @@ -0,0 +1,203 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Execution plan for writing data to [`DataSink`]s + +use super::expressions::PhysicalSortExpr; +use super::{ + DisplayFormatType, ExecutionPlan, Partitioning, SendableRecordBatchStream, Statistics, +}; +use crate::error::Result; +use arrow::datatypes::SchemaRef; +use arrow::record_batch::RecordBatch; +use arrow_array::{ArrayRef, UInt64Array}; +use arrow_schema::{DataType, Field, Schema}; +use async_trait::async_trait; +use core::fmt; +use futures::StreamExt; +use std::any::Any; +use std::sync::Arc; + +use crate::execution::context::TaskContext; +use crate::physical_plan::stream::RecordBatchStreamAdapter; +use crate::physical_plan::Distribution; +use datafusion_common::DataFusionError; + +/// `DataSink` implements writing streams of [`RecordBatch`]es to +/// destinations. +#[async_trait] +pub trait DataSink: std::fmt::Debug + Send + Sync { + // TODO add desired input ordering + // How does this sink want its input ordered? + + /// Writes the data to the sink, returns the number of values written + /// + /// This method will be called exactly once during each DML + /// statement. Thus prior to return, the sink should do any commit + /// or rollback required. + async fn write_all(&self, data: SendableRecordBatchStream) -> Result<u64>; +} + +/// Execution plan for writing record batches to a [`DataSink`] +/// +/// Returns a single row with the number of values written +pub struct InsertExec { + /// Input plan that produces the record batches to be written. + input: Arc<dyn ExecutionPlan>, + /// Sink to whic to write + sink: Arc<dyn DataSink>, + /// Schema describing the structure of the data. + schema: SchemaRef, +} + +impl fmt::Debug for InsertExec { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "InsertExec schema: {:?}", self.schema) + } +} + +impl InsertExec { + /// Create a plan to write to `sink` + pub fn new(input: Arc<dyn ExecutionPlan>, sink: Arc<dyn DataSink>) -> Self { + Self { + input, + sink, + schema: make_count_schema(), + } + } +} + +impl ExecutionPlan for InsertExec { + /// Return a reference to Any that can be used for downcasting + fn as_any(&self) -> &dyn Any { + self + } + + /// Get the schema for this execution plan + fn schema(&self) -> SchemaRef { + self.schema.clone() + } + + fn output_partitioning(&self) -> Partitioning { + Partitioning::UnknownPartitioning(1) + } + + fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> { + None + } + + fn required_input_distribution(&self) -> Vec<Distribution> { + vec![Distribution::SinglePartition] + } + + fn maintains_input_order(&self) -> Vec<bool> { + vec![false] Review Comment: I did some investigation, and I am not sure the code needs to be changed. The code in this PR gets the desired behavior of preserving the order of the input query, as can be seen in the explain plan tests (I annotated with `****`) This query has `ORDER BY c1` ```sql EXPLAIN INSERT INTO table_without_values SELECT SUM(c4) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING), COUNT(*) OVER(PARTITION BY c1 ORDER BY c9 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM aggregate_test_100 ORDER by c1 ``` The plan shows the ordering is maintained: ``` logical_plan Dml: op=[Insert] table=[table_without_values] --Projection: SUM(aggregate_test_100.c4) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING AS field1, COUNT(UInt8(1)) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING AS field2 ----Sort: aggregate_test_100.c1 ASC NULLS LAST ------Projection: SUM(aggregate_test_100.c4) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING, COUNT(UInt8(1)) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING, aggregate_test_100.c1 --------WindowAggr: windowExpr=[[SUM(aggregate_test_100.c4) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING, COUNT(UInt8(1)) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING]] ----------TableScan: aggregate_test_100 projection=[c1, c4, c9] physical_plan InsertExec: sink=MemoryTable (partitions=1) --ProjectionExec: expr=[SUM(aggregate_test_100.c4) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING@0 as field1, COUNT(UInt8(1)) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING@1 as field2] ----SortPreservingMergeExec: [c1@2 ASC NULLS LAST] ^******** You can see here the data remain sorted as it is fed into the InsertExec ------ProjectionExec: expr=[SUM(aggregate_test_100.c4) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING@3 as SUM(aggregate_test_100.c4), COUNT(UInt8(1)) PARTITION BY [aggregate_test_100.c1] ORDER BY [aggregate_test_100.c9 ASC NULLS LAST] ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING@4 as COUNT(UInt8(1)), c1@0 as c1] --------BoundedWindowAggExec: wdw=[SUM(aggregate_test_100.c4): Ok(Field { name: "SUM(aggregate_test_100.c4)", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Rows, start_bound: Preceding(UInt64(1)), end_bound: Following(UInt64(1)) }, COUNT(UInt8(1)): Ok(Field { name: "COUNT(UInt8(1))", data_type: Int64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }), frame: WindowFrame { units: Rows, start_bound: Preceding(UInt64(1)), end_bound: Following(UInt64(1)) }], mode=[Sorted] ----------SortExec: expr=[c1@0 ASC NULLS LAST,c9@2 ASC NULLS LAST] ------------CoalesceBatchesExec: target_batch_size=8192 --------------RepartitionExec: partitioning=Hash([Column { name: "c1", index: 0 }], 8), input_partitions=8 ----------------RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1 ------------------CsvExec: file_groups={1 gr ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
