2010YOUY01 commented on code in PR #16660:
URL: https://github.com/apache/datafusion/pull/16660#discussion_r2291635405


##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;

Review Comment:
   I recommend to use the `batch_size` configuration from the context



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///
+/// Here is an example:
+///
+/// We perform a `JoinType::Left` with these two batches and the operator 
being `Operator::Lt`(<). For each
+/// row on the streamed side we move a pointer on the buffered until it 
matches the condition. Once we reach
+/// the row which matches (in this case with row 1 on streamed will have its 
first match on row 2 on
+/// buffered; 100 < 200 is true), we can emit all rows after that match. We 
can emit the rows like this because
+/// if the batch is sorted in ascending order, every subsequent row will also 
satisfy the condition as they will
+/// all be larger values.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+/// Processing Row 1:
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ ─┐                                    2 │       
200        │        
+///       ├──────────────────┤  │  For row 1 on streamed side with     
├──────────────────┤         
+///     3 │       200        │  │  value 100, we emit rows 2 - 5.    3 │       
500        │       
+///       ├──────────────────┤  │  as matches when the operator is     
└──────────────────┘
+///     4 │       300        │  │  `Operator::Lt` (<) Emitting all
+///       ├──────────────────┤  │  rows after the first match (row
+///     5 │       400        │ ─┘  2 buffered side; 100 < 200)
+///       └──────────────────┘     
+///
+/// Processing Row 2:
+///   By sorting the streamed side we know
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ <- Start here when probing for the    2 │       
200        │        
+///       ├──────────────────┤    streamed side row 2.                 
├──────────────────┤         
+///     3 │       200        │                                       3 │       
500        │       
+///       ├──────────────────┤                                         
└──────────────────┘
+///     4 │       300        │  
+///       ├──────────────────┤  
+///     5 │       400        │
+///       └──────────────────┘     
+///
+/// ```
+///
+/// ## Existence Joins (Semi, Anti, Mark)
+/// Existence joins are made magnitudes of times faster with a 
`PiecewiseMergeJoin` as we only need to find
+/// the min/max value of the streamed side to be able to emit all matches on 
the buffered side. By putting
+/// the side we need to mark onto the sorted buffer side, we can emit all 
these matches at once.
+///
+/// For Left Semi, Anti, and Mark joins we swap the inputs so that the marked 
side is on the buffered side.
+///
+/// Here is an example:
+/// We perform a `JoinType::LeftSemi` with these two batches and the operator 
being `Operator::Lt`(<). Because
+/// the operator is `Operator::Lt` we can find the minimum value in the 
streamed side; in this case it is 200.
+/// We can then advance a pointer from the start of the buffer side until we 
find the first value that satisfies
+/// the predicate. All rows after that first matched value satisfy the 
condition 200 < x so we can mark all of
+/// those rows as matched.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT SEMI JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+///          Sorted Buffered Side             Unsorted Streamed Side
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       100        │        1 │       500        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │
+///            ├──────────────────┤          ├──────────────────┤    
+///          3 │       200        │        3 │       300        │
+///            ├──────────────────┤          └──────────────────┘
+///          4 │       300        │ ─┐       
+///            ├──────────────────┤  | We emit matches for row 4 - 5
+///          5 │       400        │ ─┘ on the buffered side.
+///            └──────────────────┘
+///             min value: 200
+/// ```
+///
+/// For both types of joins, the buffered side must be sorted ascending for 
`Operator::Lt` (<) or
+/// `Operator::LtEq` (<=) and descending for `Operator::Gt` (>) or 
`Operator::GtEq` (>=).
+///
+/// ## Assumptions / Notation
+/// - \[R\], \[S\]: number of pages (blocks) of `R` and `S`
+/// - |R|, |S|: number of tuples in `R` and `S`
+/// - `B`: number of buffer pages
+///
+/// # Performance (cost)
+/// Piecewise Merge Join is used over Nested Loop Join due to its superior 
performance. Here is a breakdown
+/// of the calculations:
+///
+/// ## Piecewise Merge Join (PWMJ)
+/// Intuition: Keep the buffered side (`R`) sorted and in memory (or scan it 
in sorted order),
+/// sort the streamed side (`S`), then merge in order while advancing a pivot 
on `R`.
+///
+/// Average I/O cost:
+///   `cost(PWMJ) = sort(R) + sort(S) + (\[R\] + \[S\])`
+///
+///   - If `R` (buffered) already sorted on the join key:     `cost(PWMJ) = 
sort(S) + \[R\] + \[S\]`
+///   - If `S` already sorted and `R` not:                    `cost(PWMJ) = 
sort(R) + \[R\] + \[S\]`
+///   - If both already sorted:                               `cost(PWMJ) = 
\[R\] + \[S\]`
+///
+/// ## Nested Loop Join
+///   `cost(NLJ) ≈ \[R\] + |R|·\[S\]`
+///
+/// Takeaway:
+///   - When at least one side needs sorting, PWMJ ≈ `sort(R) + sort(S) + 
\[R\] + \[S\]` on average,
+///     typically beating NLJ’s `|R|·\[S\]` (or its buffered variant) for 
nontrivial `|R|`, `\[S\]`.
+///
+/// # Further Reference Material
+/// DuckDB blog on Range Joins: [Range Joins in 
DuckDB](https://duckdb.org/2022/05/27/iejoin.html)
+#[derive(Debug)]
+pub struct PiecewiseMergeJoinExec {
+    /// Left buffered execution plan
+    pub buffered: Arc<dyn ExecutionPlan>,
+    /// Right streamed execution plan
+    pub streamed: Arc<dyn ExecutionPlan>,
+    /// The two expressions being compared
+    pub on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+    /// Comparison operator in the range predicate
+    pub operator: Operator,
+    /// How the join is performed
+    pub join_type: JoinType,
+    /// The schema once the join is applied
+    schema: SchemaRef,
+    /// Buffered data
+    buffered_fut: OnceAsync<BufferedSideData>,
+    /// Execution metrics
+    metrics: ExecutionPlanMetricsSet,
+    /// The left SortExpr
+    left_sort_exprs: LexOrdering,
+    /// The right SortExpr
+    right_sort_exprs: LexOrdering,
+    /// Sort options of join columns used in sorting the stream and buffered 
execution plans
+    sort_options: SortOptions,
+    /// Cache holding plan properties like equivalences, output partitioning 
etc.
+    cache: PlanProperties,
+}
+
+impl PiecewiseMergeJoinExec {
+    pub fn try_new(
+        buffered: Arc<dyn ExecutionPlan>,
+        streamed: Arc<dyn ExecutionPlan>,
+        on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+        operator: Operator,
+        join_type: JoinType,
+    ) -> Result<Self> {
+        // TODO: Implement mark joins for PiecewiseMergeJoin
+        if matches!(join_type, JoinType::LeftMark | JoinType::RightMark) {
+            return not_impl_err!(
+                "Mark Joins are currently not supported for PiecewiseMergeJoin"
+            );
+        }
+
+        // Take the operator and enforce a sort order on the streamed + 
buffered side based on
+        // the operator type.
+        let sort_options = match operator {
+            Operator::Lt | Operator::LtEq => {
+                // For left existence joins the inputs will be swapped so the 
sort
+                // options are switched
+                if is_right_existence_join(join_type) {
+                    SortOptions::new(false, false)
+                } else {
+                    SortOptions::new(true, false)
+                }
+            }
+            Operator::Gt | Operator::GtEq => {
+                if is_right_existence_join(join_type) {
+                    SortOptions::new(true, false)
+                } else {
+                    SortOptions::new(false, false)
+                }
+            }
+            _ => {
+                return plan_err!(

Review Comment:
   `internal_err`? Planner should promise to provide the valid operator, 
otherwise it should be a potential bug. Or maybe we can create a new enum type 
to be safer.



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///
+/// Here is an example:
+///
+/// We perform a `JoinType::Left` with these two batches and the operator 
being `Operator::Lt`(<). For each
+/// row on the streamed side we move a pointer on the buffered until it 
matches the condition. Once we reach
+/// the row which matches (in this case with row 1 on streamed will have its 
first match on row 2 on
+/// buffered; 100 < 200 is true), we can emit all rows after that match. We 
can emit the rows like this because
+/// if the batch is sorted in ascending order, every subsequent row will also 
satisfy the condition as they will
+/// all be larger values.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+/// Processing Row 1:
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ ─┐                                    2 │       
200        │        
+///       ├──────────────────┤  │  For row 1 on streamed side with     
├──────────────────┤         
+///     3 │       200        │  │  value 100, we emit rows 2 - 5.    3 │       
500        │       
+///       ├──────────────────┤  │  as matches when the operator is     
└──────────────────┘
+///     4 │       300        │  │  `Operator::Lt` (<) Emitting all
+///       ├──────────────────┤  │  rows after the first match (row
+///     5 │       400        │ ─┘  2 buffered side; 100 < 200)
+///       └──────────────────┘     
+///
+/// Processing Row 2:
+///   By sorting the streamed side we know
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ <- Start here when probing for the    2 │       
200        │        
+///       ├──────────────────┤    streamed side row 2.                 
├──────────────────┤         
+///     3 │       200        │                                       3 │       
500        │       
+///       ├──────────────────┤                                         
└──────────────────┘
+///     4 │       300        │  
+///       ├──────────────────┤  
+///     5 │       400        │
+///       └──────────────────┘     
+///
+/// ```
+///
+/// ## Existence Joins (Semi, Anti, Mark)
+/// Existence joins are made magnitudes of times faster with a 
`PiecewiseMergeJoin` as we only need to find
+/// the min/max value of the streamed side to be able to emit all matches on 
the buffered side. By putting
+/// the side we need to mark onto the sorted buffer side, we can emit all 
these matches at once.
+///
+/// For Left Semi, Anti, and Mark joins we swap the inputs so that the marked 
side is on the buffered side.
+///
+/// Here is an example:
+/// We perform a `JoinType::LeftSemi` with these two batches and the operator 
being `Operator::Lt`(<). Because
+/// the operator is `Operator::Lt` we can find the minimum value in the 
streamed side; in this case it is 200.
+/// We can then advance a pointer from the start of the buffer side until we 
find the first value that satisfies
+/// the predicate. All rows after that first matched value satisfy the 
condition 200 < x so we can mark all of
+/// those rows as matched.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT SEMI JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+///          Sorted Buffered Side             Unsorted Streamed Side
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       100        │        1 │       500        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │
+///            ├──────────────────┤          ├──────────────────┤    
+///          3 │       200        │        3 │       300        │
+///            ├──────────────────┤          └──────────────────┘
+///          4 │       300        │ ─┐       
+///            ├──────────────────┤  | We emit matches for row 4 - 5
+///          5 │       400        │ ─┘ on the buffered side.
+///            └──────────────────┘
+///             min value: 200
+/// ```
+///
+/// For both types of joins, the buffered side must be sorted ascending for 
`Operator::Lt` (<) or
+/// `Operator::LtEq` (<=) and descending for `Operator::Gt` (>) or 
`Operator::GtEq` (>=).
+///
+/// ## Assumptions / Notation
+/// - \[R\], \[S\]: number of pages (blocks) of `R` and `S`
+/// - |R|, |S|: number of tuples in `R` and `S`
+/// - `B`: number of buffer pages
+///
+/// # Performance (cost)
+/// Piecewise Merge Join is used over Nested Loop Join due to its superior 
performance. Here is a breakdown
+/// of the calculations:
+///
+/// ## Piecewise Merge Join (PWMJ)
+/// Intuition: Keep the buffered side (`R`) sorted and in memory (or scan it 
in sorted order),
+/// sort the streamed side (`S`), then merge in order while advancing a pivot 
on `R`.
+///
+/// Average I/O cost:
+///   `cost(PWMJ) = sort(R) + sort(S) + (\[R\] + \[S\])`
+///
+///   - If `R` (buffered) already sorted on the join key:     `cost(PWMJ) = 
sort(S) + \[R\] + \[S\]`
+///   - If `S` already sorted and `R` not:                    `cost(PWMJ) = 
sort(R) + \[R\] + \[S\]`
+///   - If both already sorted:                               `cost(PWMJ) = 
\[R\] + \[S\]`
+///
+/// ## Nested Loop Join
+///   `cost(NLJ) ≈ \[R\] + |R|·\[S\]`
+///
+/// Takeaway:
+///   - When at least one side needs sorting, PWMJ ≈ `sort(R) + sort(S) + 
\[R\] + \[S\]` on average,
+///     typically beating NLJ’s `|R|·\[S\]` (or its buffered variant) for 
nontrivial `|R|`, `\[S\]`.
+///
+/// # Further Reference Material
+/// DuckDB blog on Range Joins: [Range Joins in 
DuckDB](https://duckdb.org/2022/05/27/iejoin.html)
+#[derive(Debug)]
+pub struct PiecewiseMergeJoinExec {
+    /// Left buffered execution plan
+    pub buffered: Arc<dyn ExecutionPlan>,
+    /// Right streamed execution plan
+    pub streamed: Arc<dyn ExecutionPlan>,
+    /// The two expressions being compared
+    pub on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+    /// Comparison operator in the range predicate
+    pub operator: Operator,
+    /// How the join is performed
+    pub join_type: JoinType,
+    /// The schema once the join is applied
+    schema: SchemaRef,
+    /// Buffered data
+    buffered_fut: OnceAsync<BufferedSideData>,
+    /// Execution metrics
+    metrics: ExecutionPlanMetricsSet,
+    /// The left SortExpr
+    left_sort_exprs: LexOrdering,
+    /// The right SortExpr
+    right_sort_exprs: LexOrdering,
+    /// Sort options of join columns used in sorting the stream and buffered 
execution plans
+    sort_options: SortOptions,
+    /// Cache holding plan properties like equivalences, output partitioning 
etc.
+    cache: PlanProperties,
+}
+
+impl PiecewiseMergeJoinExec {
+    pub fn try_new(
+        buffered: Arc<dyn ExecutionPlan>,
+        streamed: Arc<dyn ExecutionPlan>,
+        on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+        operator: Operator,
+        join_type: JoinType,
+    ) -> Result<Self> {
+        // TODO: Implement mark joins for PiecewiseMergeJoin
+        if matches!(join_type, JoinType::LeftMark | JoinType::RightMark) {
+            return not_impl_err!(
+                "Mark Joins are currently not supported for PiecewiseMergeJoin"
+            );
+        }
+
+        // Take the operator and enforce a sort order on the streamed + 
buffered side based on
+        // the operator type.
+        let sort_options = match operator {
+            Operator::Lt | Operator::LtEq => {
+                // For left existence joins the inputs will be swapped so the 
sort
+                // options are switched
+                if is_right_existence_join(join_type) {
+                    SortOptions::new(false, false)
+                } else {
+                    SortOptions::new(true, false)
+                }
+            }
+            Operator::Gt | Operator::GtEq => {
+                if is_right_existence_join(join_type) {
+                    SortOptions::new(true, false)
+                } else {
+                    SortOptions::new(false, false)
+                }
+            }
+            _ => {
+                return plan_err!(
+                    "Cannot contain non-range operator in 
PiecewiseMergeJoinExec"
+                )
+            }
+        };
+
+        // Give the same `sort_option for comparison later`
+        let left_sort_exprs =
+            vec![PhysicalSortExpr::new(Arc::clone(&on.0), sort_options)];
+        let right_sort_exprs =
+            vec![PhysicalSortExpr::new(Arc::clone(&on.1), sort_options)];
+
+        let Some(left_sort_exprs) = LexOrdering::new(left_sort_exprs) else {
+            return plan_err!(
+                "PiecewiseMergeJoinExec requires valid sort expressions for 
its left side"
+            );
+        };
+        let Some(right_sort_exprs) = LexOrdering::new(right_sort_exprs) else {
+            return plan_err!(
+                "PiecewiseMergeJoinExec requires valid sort expressions for 
its right side"
+            );
+        };
+
+        let buffered_schema = buffered.schema();
+        let streamed_schema = streamed.schema();
+
+        // Create output schema for the join
+        let schema =
+            Arc::new(build_join_schema(&buffered_schema, &streamed_schema, 
&join_type).0);
+        let cache = Self::compute_properties(
+            &buffered,
+            &streamed,
+            Arc::clone(&schema),
+            join_type,
+            &on,
+        )?;
+
+        Ok(Self {
+            streamed,
+            buffered,
+            on,
+            operator,
+            join_type,
+            schema,
+            buffered_fut: Default::default(),
+            metrics: ExecutionPlanMetricsSet::new(),
+            left_sort_exprs,
+            right_sort_exprs,
+            sort_options,
+            cache,
+        })
+    }
+
+    /// Refeerence to buffered side execution plan
+    pub fn buffered(&self) -> &Arc<dyn ExecutionPlan> {
+        &self.buffered
+    }
+
+    /// Refeference to streamed side execution plan
+    pub fn streamed(&self) -> &Arc<dyn ExecutionPlan> {
+        &self.streamed
+    }
+
+    /// Join type
+    pub fn join_type(&self) -> JoinType {
+        self.join_type
+    }
+
+    /// Reference to sort options
+    pub fn sort_options(&self) -> &SortOptions {
+        &self.sort_options
+    }
+
+    /// Get probe side (streamed side) for the PiecewiseMergeJoin
+    /// In current implementation, probe side is determined according to join 
type.
+    pub fn probe_side(join_type: &JoinType) -> JoinSide {
+        match join_type {
+            JoinType::Right
+            | JoinType::Inner
+            | JoinType::Full
+            | JoinType::RightSemi
+            | JoinType::RightAnti
+            | JoinType::RightMark => JoinSide::Right,
+            JoinType::Left
+            | JoinType::LeftAnti
+            | JoinType::LeftSemi
+            | JoinType::LeftMark => JoinSide::Left,
+        }
+    }
+
+    pub fn compute_properties(
+        buffered: &Arc<dyn ExecutionPlan>,
+        streamed: &Arc<dyn ExecutionPlan>,
+        schema: SchemaRef,
+        join_type: JoinType,
+        join_on: &(PhysicalExprRef, PhysicalExprRef),
+    ) -> Result<PlanProperties> {
+        let eq_properties = join_equivalence_properties(
+            buffered.equivalence_properties().clone(),
+            streamed.equivalence_properties().clone(),
+            &join_type,
+            schema,
+            &Self::maintains_input_order(join_type),
+            Some(Self::probe_side(&join_type)),
+            std::slice::from_ref(join_on),
+        )?;
+
+        let output_partitioning =
+            symmetric_join_output_partitioning(buffered, streamed, 
&join_type)?;
+
+        Ok(PlanProperties::new(
+            eq_properties,
+            output_partitioning,
+            EmissionType::Incremental,
+            boundedness_from_children([buffered, streamed]),
+        ))
+    }
+
+    fn maintains_input_order(join_type: JoinType) -> Vec<bool> {

Review Comment:
   can we leave this as follow-up and do `vec![false, false]` for now? I think 
it requires a bit more testing, and also I noticed a internal side swap 
operation that make it hard to reason about.



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///
+/// Here is an example:
+///
+/// We perform a `JoinType::Left` with these two batches and the operator 
being `Operator::Lt`(<). For each
+/// row on the streamed side we move a pointer on the buffered until it 
matches the condition. Once we reach
+/// the row which matches (in this case with row 1 on streamed will have its 
first match on row 2 on
+/// buffered; 100 < 200 is true), we can emit all rows after that match. We 
can emit the rows like this because
+/// if the batch is sorted in ascending order, every subsequent row will also 
satisfy the condition as they will
+/// all be larger values.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+/// Processing Row 1:
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ ─┐                                    2 │       
200        │        
+///       ├──────────────────┤  │  For row 1 on streamed side with     
├──────────────────┤         
+///     3 │       200        │  │  value 100, we emit rows 2 - 5.    3 │       
500        │       
+///       ├──────────────────┤  │  as matches when the operator is     
└──────────────────┘
+///     4 │       300        │  │  `Operator::Lt` (<) Emitting all
+///       ├──────────────────┤  │  rows after the first match (row
+///     5 │       400        │ ─┘  2 buffered side; 100 < 200)
+///       └──────────────────┘     
+///
+/// Processing Row 2:
+///   By sorting the streamed side we know
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ <- Start here when probing for the    2 │       
200        │        
+///       ├──────────────────┤    streamed side row 2.                 
├──────────────────┤         
+///     3 │       200        │                                       3 │       
500        │       
+///       ├──────────────────┤                                         
└──────────────────┘
+///     4 │       300        │  
+///       ├──────────────────┤  
+///     5 │       400        │
+///       └──────────────────┘     
+///
+/// ```
+///
+/// ## Existence Joins (Semi, Anti, Mark)
+/// Existence joins are made magnitudes of times faster with a 
`PiecewiseMergeJoin` as we only need to find
+/// the min/max value of the streamed side to be able to emit all matches on 
the buffered side. By putting
+/// the side we need to mark onto the sorted buffer side, we can emit all 
these matches at once.
+///
+/// For Left Semi, Anti, and Mark joins we swap the inputs so that the marked 
side is on the buffered side.
+///
+/// Here is an example:
+/// We perform a `JoinType::LeftSemi` with these two batches and the operator 
being `Operator::Lt`(<). Because
+/// the operator is `Operator::Lt` we can find the minimum value in the 
streamed side; in this case it is 200.
+/// We can then advance a pointer from the start of the buffer side until we 
find the first value that satisfies
+/// the predicate. All rows after that first matched value satisfy the 
condition 200 < x so we can mark all of
+/// those rows as matched.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT SEMI JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+///          Sorted Buffered Side             Unsorted Streamed Side
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       100        │        1 │       500        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │
+///            ├──────────────────┤          ├──────────────────┤    
+///          3 │       200        │        3 │       300        │
+///            ├──────────────────┤          └──────────────────┘
+///          4 │       300        │ ─┐       
+///            ├──────────────────┤  | We emit matches for row 4 - 5
+///          5 │       400        │ ─┘ on the buffered side.
+///            └──────────────────┘
+///             min value: 200
+/// ```
+///
+/// For both types of joins, the buffered side must be sorted ascending for 
`Operator::Lt` (<) or
+/// `Operator::LtEq` (<=) and descending for `Operator::Gt` (>) or 
`Operator::GtEq` (>=).
+///
+/// ## Assumptions / Notation
+/// - \[R\], \[S\]: number of pages (blocks) of `R` and `S`
+/// - |R|, |S|: number of tuples in `R` and `S`
+/// - `B`: number of buffer pages
+///
+/// # Performance (cost)
+/// Piecewise Merge Join is used over Nested Loop Join due to its superior 
performance. Here is a breakdown
+/// of the calculations:
+///
+/// ## Piecewise Merge Join (PWMJ)
+/// Intuition: Keep the buffered side (`R`) sorted and in memory (or scan it 
in sorted order),
+/// sort the streamed side (`S`), then merge in order while advancing a pivot 
on `R`.
+///
+/// Average I/O cost:
+///   `cost(PWMJ) = sort(R) + sort(S) + (\[R\] + \[S\])`
+///
+///   - If `R` (buffered) already sorted on the join key:     `cost(PWMJ) = 
sort(S) + \[R\] + \[S\]`
+///   - If `S` already sorted and `R` not:                    `cost(PWMJ) = 
sort(R) + \[R\] + \[S\]`
+///   - If both already sorted:                               `cost(PWMJ) = 
\[R\] + \[S\]`
+///
+/// ## Nested Loop Join
+///   `cost(NLJ) ≈ \[R\] + |R|·\[S\]`
+///
+/// Takeaway:
+///   - When at least one side needs sorting, PWMJ ≈ `sort(R) + sort(S) + 
\[R\] + \[S\]` on average,
+///     typically beating NLJ’s `|R|·\[S\]` (or its buffered variant) for 
nontrivial `|R|`, `\[S\]`.
+///
+/// # Further Reference Material
+/// DuckDB blog on Range Joins: [Range Joins in 
DuckDB](https://duckdb.org/2022/05/27/iejoin.html)
+#[derive(Debug)]
+pub struct PiecewiseMergeJoinExec {
+    /// Left buffered execution plan
+    pub buffered: Arc<dyn ExecutionPlan>,
+    /// Right streamed execution plan
+    pub streamed: Arc<dyn ExecutionPlan>,
+    /// The two expressions being compared
+    pub on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+    /// Comparison operator in the range predicate
+    pub operator: Operator,
+    /// How the join is performed
+    pub join_type: JoinType,
+    /// The schema once the join is applied
+    schema: SchemaRef,
+    /// Buffered data
+    buffered_fut: OnceAsync<BufferedSideData>,
+    /// Execution metrics
+    metrics: ExecutionPlanMetricsSet,
+    /// The left SortExpr
+    left_sort_exprs: LexOrdering,
+    /// The right SortExpr
+    right_sort_exprs: LexOrdering,
+    /// Sort options of join columns used in sorting the stream and buffered 
execution plans
+    sort_options: SortOptions,
+    /// Cache holding plan properties like equivalences, output partitioning 
etc.
+    cache: PlanProperties,
+}
+
+impl PiecewiseMergeJoinExec {
+    pub fn try_new(
+        buffered: Arc<dyn ExecutionPlan>,
+        streamed: Arc<dyn ExecutionPlan>,
+        on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+        operator: Operator,
+        join_type: JoinType,
+    ) -> Result<Self> {
+        // TODO: Implement mark joins for PiecewiseMergeJoin
+        if matches!(join_type, JoinType::LeftMark | JoinType::RightMark) {
+            return not_impl_err!(
+                "Mark Joins are currently not supported for PiecewiseMergeJoin"
+            );
+        }
+
+        // Take the operator and enforce a sort order on the streamed + 
buffered side based on
+        // the operator type.
+        let sort_options = match operator {
+            Operator::Lt | Operator::LtEq => {
+                // For left existence joins the inputs will be swapped so the 
sort
+                // options are switched
+                if is_right_existence_join(join_type) {
+                    SortOptions::new(false, false)

Review Comment:
   Here it defines a null order, I think those nulls should be special handled 
later, let's add a comment to point to the location that those nulls are 
handled.



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///
+/// Here is an example:
+///
+/// We perform a `JoinType::Left` with these two batches and the operator 
being `Operator::Lt`(<). For each
+/// row on the streamed side we move a pointer on the buffered until it 
matches the condition. Once we reach
+/// the row which matches (in this case with row 1 on streamed will have its 
first match on row 2 on
+/// buffered; 100 < 200 is true), we can emit all rows after that match. We 
can emit the rows like this because
+/// if the batch is sorted in ascending order, every subsequent row will also 
satisfy the condition as they will
+/// all be larger values.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+/// Processing Row 1:
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ ─┐                                    2 │       
200        │        
+///       ├──────────────────┤  │  For row 1 on streamed side with     
├──────────────────┤         
+///     3 │       200        │  │  value 100, we emit rows 2 - 5.    3 │       
500        │       
+///       ├──────────────────┤  │  as matches when the operator is     
└──────────────────┘
+///     4 │       300        │  │  `Operator::Lt` (<) Emitting all
+///       ├──────────────────┤  │  rows after the first match (row
+///     5 │       400        │ ─┘  2 buffered side; 100 < 200)
+///       └──────────────────┘     
+///
+/// Processing Row 2:
+///   By sorting the streamed side we know
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ <- Start here when probing for the    2 │       
200        │        
+///       ├──────────────────┤    streamed side row 2.                 
├──────────────────┤         
+///     3 │       200        │                                       3 │       
500        │       
+///       ├──────────────────┤                                         
└──────────────────┘
+///     4 │       300        │  
+///       ├──────────────────┤  
+///     5 │       400        │
+///       └──────────────────┘     
+///
+/// ```
+///
+/// ## Existence Joins (Semi, Anti, Mark)
+/// Existence joins are made magnitudes of times faster with a 
`PiecewiseMergeJoin` as we only need to find
+/// the min/max value of the streamed side to be able to emit all matches on 
the buffered side. By putting
+/// the side we need to mark onto the sorted buffer side, we can emit all 
these matches at once.
+///
+/// For Left Semi, Anti, and Mark joins we swap the inputs so that the marked 
side is on the buffered side.
+///
+/// Here is an example:
+/// We perform a `JoinType::LeftSemi` with these two batches and the operator 
being `Operator::Lt`(<). Because
+/// the operator is `Operator::Lt` we can find the minimum value in the 
streamed side; in this case it is 200.
+/// We can then advance a pointer from the start of the buffer side until we 
find the first value that satisfies
+/// the predicate. All rows after that first matched value satisfy the 
condition 200 < x so we can mark all of
+/// those rows as matched.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT SEMI JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+///          Sorted Buffered Side             Unsorted Streamed Side
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       100        │        1 │       500        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │
+///            ├──────────────────┤          ├──────────────────┤    
+///          3 │       200        │        3 │       300        │
+///            ├──────────────────┤          └──────────────────┘
+///          4 │       300        │ ─┐       
+///            ├──────────────────┤  | We emit matches for row 4 - 5
+///          5 │       400        │ ─┘ on the buffered side.
+///            └──────────────────┘
+///             min value: 200
+/// ```
+///
+/// For both types of joins, the buffered side must be sorted ascending for 
`Operator::Lt` (<) or
+/// `Operator::LtEq` (<=) and descending for `Operator::Gt` (>) or 
`Operator::GtEq` (>=).
+///
+/// ## Assumptions / Notation
+/// - \[R\], \[S\]: number of pages (blocks) of `R` and `S`
+/// - |R|, |S|: number of tuples in `R` and `S`
+/// - `B`: number of buffer pages
+///
+/// # Performance (cost)
+/// Piecewise Merge Join is used over Nested Loop Join due to its superior 
performance. Here is a breakdown
+/// of the calculations:
+///
+/// ## Piecewise Merge Join (PWMJ)
+/// Intuition: Keep the buffered side (`R`) sorted and in memory (or scan it 
in sorted order),
+/// sort the streamed side (`S`), then merge in order while advancing a pivot 
on `R`.
+///
+/// Average I/O cost:
+///   `cost(PWMJ) = sort(R) + sort(S) + (\[R\] + \[S\])`
+///
+///   - If `R` (buffered) already sorted on the join key:     `cost(PWMJ) = 
sort(S) + \[R\] + \[S\]`
+///   - If `S` already sorted and `R` not:                    `cost(PWMJ) = 
sort(R) + \[R\] + \[S\]`
+///   - If both already sorted:                               `cost(PWMJ) = 
\[R\] + \[S\]`
+///
+/// ## Nested Loop Join
+///   `cost(NLJ) ≈ \[R\] + |R|·\[S\]`
+///
+/// Takeaway:
+///   - When at least one side needs sorting, PWMJ ≈ `sort(R) + sort(S) + 
\[R\] + \[S\]` on average,
+///     typically beating NLJ’s `|R|·\[S\]` (or its buffered variant) for 
nontrivial `|R|`, `\[S\]`.
+///
+/// # Further Reference Material
+/// DuckDB blog on Range Joins: [Range Joins in 
DuckDB](https://duckdb.org/2022/05/27/iejoin.html)
+#[derive(Debug)]
+pub struct PiecewiseMergeJoinExec {
+    /// Left buffered execution plan
+    pub buffered: Arc<dyn ExecutionPlan>,
+    /// Right streamed execution plan
+    pub streamed: Arc<dyn ExecutionPlan>,
+    /// The two expressions being compared
+    pub on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+    /// Comparison operator in the range predicate
+    pub operator: Operator,
+    /// How the join is performed
+    pub join_type: JoinType,
+    /// The schema once the join is applied
+    schema: SchemaRef,
+    /// Buffered data
+    buffered_fut: OnceAsync<BufferedSideData>,
+    /// Execution metrics
+    metrics: ExecutionPlanMetricsSet,
+    /// The left SortExpr
+    left_sort_exprs: LexOrdering,
+    /// The right SortExpr
+    right_sort_exprs: LexOrdering,
+    /// Sort options of join columns used in sorting the stream and buffered 
execution plans
+    sort_options: SortOptions,
+    /// Cache holding plan properties like equivalences, output partitioning 
etc.
+    cache: PlanProperties,
+}
+
+impl PiecewiseMergeJoinExec {
+    pub fn try_new(
+        buffered: Arc<dyn ExecutionPlan>,
+        streamed: Arc<dyn ExecutionPlan>,
+        on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+        operator: Operator,
+        join_type: JoinType,
+    ) -> Result<Self> {
+        // TODO: Implement mark joins for PiecewiseMergeJoin
+        if matches!(join_type, JoinType::LeftMark | JoinType::RightMark) {
+            return not_impl_err!(
+                "Mark Joins are currently not supported for PiecewiseMergeJoin"
+            );
+        }
+
+        // Take the operator and enforce a sort order on the streamed + 
buffered side based on
+        // the operator type.
+        let sort_options = match operator {
+            Operator::Lt | Operator::LtEq => {
+                // For left existence joins the inputs will be swapped so the 
sort
+                // options are switched
+                if is_right_existence_join(join_type) {
+                    SortOptions::new(false, false)
+                } else {
+                    SortOptions::new(true, false)
+                }
+            }
+            Operator::Gt | Operator::GtEq => {
+                if is_right_existence_join(join_type) {
+                    SortOptions::new(true, false)
+                } else {
+                    SortOptions::new(false, false)
+                }
+            }
+            _ => {
+                return plan_err!(
+                    "Cannot contain non-range operator in 
PiecewiseMergeJoinExec"
+                )
+            }
+        };
+
+        // Give the same `sort_option for comparison later`
+        let left_sort_exprs =
+            vec![PhysicalSortExpr::new(Arc::clone(&on.0), sort_options)];
+        let right_sort_exprs =
+            vec![PhysicalSortExpr::new(Arc::clone(&on.1), sort_options)];
+
+        let Some(left_sort_exprs) = LexOrdering::new(left_sort_exprs) else {
+            return plan_err!(

Review Comment:
   also `internal_err`?



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///
+/// Here is an example:
+///
+/// We perform a `JoinType::Left` with these two batches and the operator 
being `Operator::Lt`(<). For each
+/// row on the streamed side we move a pointer on the buffered until it 
matches the condition. Once we reach
+/// the row which matches (in this case with row 1 on streamed will have its 
first match on row 2 on
+/// buffered; 100 < 200 is true), we can emit all rows after that match. We 
can emit the rows like this because
+/// if the batch is sorted in ascending order, every subsequent row will also 
satisfy the condition as they will
+/// all be larger values.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+/// Processing Row 1:
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ ─┐                                    2 │       
200        │        
+///       ├──────────────────┤  │  For row 1 on streamed side with     
├──────────────────┤         
+///     3 │       200        │  │  value 100, we emit rows 2 - 5.    3 │       
500        │       
+///       ├──────────────────┤  │  as matches when the operator is     
└──────────────────┘
+///     4 │       300        │  │  `Operator::Lt` (<) Emitting all
+///       ├──────────────────┤  │  rows after the first match (row
+///     5 │       400        │ ─┘  2 buffered side; 100 < 200)
+///       └──────────────────┘     
+///
+/// Processing Row 2:
+///   By sorting the streamed side we know
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ <- Start here when probing for the    2 │       
200        │        
+///       ├──────────────────┤    streamed side row 2.                 
├──────────────────┤         
+///     3 │       200        │                                       3 │       
500        │       
+///       ├──────────────────┤                                         
└──────────────────┘
+///     4 │       300        │  
+///       ├──────────────────┤  
+///     5 │       400        │
+///       └──────────────────┘     
+///
+/// ```
+///
+/// ## Existence Joins (Semi, Anti, Mark)
+/// Existence joins are made magnitudes of times faster with a 
`PiecewiseMergeJoin` as we only need to find
+/// the min/max value of the streamed side to be able to emit all matches on 
the buffered side. By putting
+/// the side we need to mark onto the sorted buffer side, we can emit all 
these matches at once.
+///
+/// For Left Semi, Anti, and Mark joins we swap the inputs so that the marked 
side is on the buffered side.
+///
+/// Here is an example:
+/// We perform a `JoinType::LeftSemi` with these two batches and the operator 
being `Operator::Lt`(<). Because
+/// the operator is `Operator::Lt` we can find the minimum value in the 
streamed side; in this case it is 200.
+/// We can then advance a pointer from the start of the buffer side until we 
find the first value that satisfies
+/// the predicate. All rows after that first matched value satisfy the 
condition 200 < x so we can mark all of
+/// those rows as matched.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)

Review Comment:
   The rows here are not consistent with the figure



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///
+/// Here is an example:
+///
+/// We perform a `JoinType::Left` with these two batches and the operator 
being `Operator::Lt`(<). For each
+/// row on the streamed side we move a pointer on the buffered until it 
matches the condition. Once we reach
+/// the row which matches (in this case with row 1 on streamed will have its 
first match on row 2 on
+/// buffered; 100 < 200 is true), we can emit all rows after that match. We 
can emit the rows like this because
+/// if the batch is sorted in ascending order, every subsequent row will also 
satisfy the condition as they will
+/// all be larger values.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+/// Processing Row 1:
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ ─┐                                    2 │       
200        │        
+///       ├──────────────────┤  │  For row 1 on streamed side with     
├──────────────────┤         
+///     3 │       200        │  │  value 100, we emit rows 2 - 5.    3 │       
500        │       
+///       ├──────────────────┤  │  as matches when the operator is     
└──────────────────┘
+///     4 │       300        │  │  `Operator::Lt` (<) Emitting all
+///       ├──────────────────┤  │  rows after the first match (row
+///     5 │       400        │ ─┘  2 buffered side; 100 < 200)
+///       └──────────────────┘     
+///
+/// Processing Row 2:
+///   By sorting the streamed side we know
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ <- Start here when probing for the    2 │       
200        │        
+///       ├──────────────────┤    streamed side row 2.                 
├──────────────────┤         
+///     3 │       200        │                                       3 │       
500        │       
+///       ├──────────────────┤                                         
└──────────────────┘
+///     4 │       300        │  
+///       ├──────────────────┤  
+///     5 │       400        │
+///       └──────────────────┘     
+///
+/// ```
+///
+/// ## Existence Joins (Semi, Anti, Mark)
+/// Existence joins are made magnitudes of times faster with a 
`PiecewiseMergeJoin` as we only need to find
+/// the min/max value of the streamed side to be able to emit all matches on 
the buffered side. By putting
+/// the side we need to mark onto the sorted buffer side, we can emit all 
these matches at once.
+///
+/// For Left Semi, Anti, and Mark joins we swap the inputs so that the marked 
side is on the buffered side.
+///
+/// Here is an example:
+/// We perform a `JoinType::LeftSemi` with these two batches and the operator 
being `Operator::Lt`(<). Because
+/// the operator is `Operator::Lt` we can find the minimum value in the 
streamed side; in this case it is 200.
+/// We can then advance a pointer from the start of the buffer side until we 
find the first value that satisfies
+/// the predicate. All rows after that first matched value satisfy the 
condition 200 < x so we can mark all of
+/// those rows as matched.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT SEMI JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+///          Sorted Buffered Side             Unsorted Streamed Side
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       100        │        1 │       500        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │
+///            ├──────────────────┤          ├──────────────────┤    
+///          3 │       200        │        3 │       300        │
+///            ├──────────────────┤          └──────────────────┘
+///          4 │       300        │ ─┐       
+///            ├──────────────────┤  | We emit matches for row 4 - 5
+///          5 │       400        │ ─┘ on the buffered side.
+///            └──────────────────┘
+///             min value: 200
+/// ```
+///
+/// For both types of joins, the buffered side must be sorted ascending for 
`Operator::Lt` (<) or
+/// `Operator::LtEq` (<=) and descending for `Operator::Gt` (>) or 
`Operator::GtEq` (>=).
+///
+/// ## Assumptions / Notation
+/// - \[R\], \[S\]: number of pages (blocks) of `R` and `S`
+/// - |R|, |S|: number of tuples in `R` and `S`
+/// - `B`: number of buffer pages
+///
+/// # Performance (cost)
+/// Piecewise Merge Join is used over Nested Loop Join due to its superior 
performance. Here is a breakdown
+/// of the calculations:
+///
+/// ## Piecewise Merge Join (PWMJ)
+/// Intuition: Keep the buffered side (`R`) sorted and in memory (or scan it 
in sorted order),
+/// sort the streamed side (`S`), then merge in order while advancing a pivot 
on `R`.
+///
+/// Average I/O cost:
+///   `cost(PWMJ) = sort(R) + sort(S) + (\[R\] + \[S\])`
+///
+///   - If `R` (buffered) already sorted on the join key:     `cost(PWMJ) = 
sort(S) + \[R\] + \[S\]`
+///   - If `S` already sorted and `R` not:                    `cost(PWMJ) = 
sort(R) + \[R\] + \[S\]`
+///   - If both already sorted:                               `cost(PWMJ) = 
\[R\] + \[S\]`
+///
+/// ## Nested Loop Join
+///   `cost(NLJ) ≈ \[R\] + |R|·\[S\]`
+///
+/// Takeaway:
+///   - When at least one side needs sorting, PWMJ ≈ `sort(R) + sort(S) + 
\[R\] + \[S\]` on average,
+///     typically beating NLJ’s `|R|·\[S\]` (or its buffered variant) for 
nontrivial `|R|`, `\[S\]`.
+///
+/// # Further Reference Material
+/// DuckDB blog on Range Joins: [Range Joins in 
DuckDB](https://duckdb.org/2022/05/27/iejoin.html)
+#[derive(Debug)]
+pub struct PiecewiseMergeJoinExec {
+    /// Left buffered execution plan
+    pub buffered: Arc<dyn ExecutionPlan>,
+    /// Right streamed execution plan
+    pub streamed: Arc<dyn ExecutionPlan>,
+    /// The two expressions being compared
+    pub on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+    /// Comparison operator in the range predicate
+    pub operator: Operator,
+    /// How the join is performed
+    pub join_type: JoinType,
+    /// The schema once the join is applied
+    schema: SchemaRef,
+    /// Buffered data
+    buffered_fut: OnceAsync<BufferedSideData>,
+    /// Execution metrics
+    metrics: ExecutionPlanMetricsSet,
+    /// The left SortExpr

Review Comment:
   I think we could also add more precise definitions to those two sort exprs, 
like in the top-level comments.



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to

Review Comment:
   I think we should describe more in detail:
   1. How is the sort order decided -- I haven't check the impl yet, but I 
guess it's using the comparison operator (like `>`) to decide an order, and 
later it only requires scan both side from index `0` to `len-1` to finish the 
join?
   2. How to get such sort order: I think for buffer side it's inserting an 
SortExec outside this operator during planning; for probe side it's when 
reading in new batch, sort it directly?



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///
+/// Here is an example:
+///
+/// We perform a `JoinType::Left` with these two batches and the operator 
being `Operator::Lt`(<). For each
+/// row on the streamed side we move a pointer on the buffered until it 
matches the condition. Once we reach
+/// the row which matches (in this case with row 1 on streamed will have its 
first match on row 2 on
+/// buffered; 100 < 200 is true), we can emit all rows after that match. We 
can emit the rows like this because
+/// if the batch is sorted in ascending order, every subsequent row will also 
satisfy the condition as they will
+/// all be larger values.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+/// Processing Row 1:
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ ─┐                                    2 │       
200        │        
+///       ├──────────────────┤  │  For row 1 on streamed side with     
├──────────────────┤         
+///     3 │       200        │  │  value 100, we emit rows 2 - 5.    3 │       
500        │       
+///       ├──────────────────┤  │  as matches when the operator is     
└──────────────────┘
+///     4 │       300        │  │  `Operator::Lt` (<) Emitting all
+///       ├──────────────────┤  │  rows after the first match (row
+///     5 │       400        │ ─┘  2 buffered side; 100 < 200)
+///       └──────────────────┘     
+///
+/// Processing Row 2:
+///   By sorting the streamed side we know
+///
+///       Sorted Buffered Side                                         Sorted 
Streamed Side          
+///       ┌──────────────────┐                                         
┌──────────────────┐         
+///     1 │       100        │                                       1 │       
100        │        
+///       ├──────────────────┤                                         
├──────────────────┤         
+///     2 │       200        │ <- Start here when probing for the    2 │       
200        │        
+///       ├──────────────────┤    streamed side row 2.                 
├──────────────────┤         
+///     3 │       200        │                                       3 │       
500        │       
+///       ├──────────────────┤                                         
└──────────────────┘
+///     4 │       300        │  
+///       ├──────────────────┤  
+///     5 │       400        │
+///       └──────────────────┘     
+///
+/// ```
+///
+/// ## Existence Joins (Semi, Anti, Mark)
+/// Existence joins are made magnitudes of times faster with a 
`PiecewiseMergeJoin` as we only need to find
+/// the min/max value of the streamed side to be able to emit all matches on 
the buffered side. By putting
+/// the side we need to mark onto the sorted buffer side, we can emit all 
these matches at once.
+///
+/// For Left Semi, Anti, and Mark joins we swap the inputs so that the marked 
side is on the buffered side.
+///
+/// Here is an example:
+/// We perform a `JoinType::LeftSemi` with these two batches and the operator 
being `Operator::Lt`(<). Because
+/// the operator is `Operator::Lt` we can find the minimum value in the 
streamed side; in this case it is 200.
+/// We can then advance a pointer from the start of the buffer side until we 
find the first value that satisfies
+/// the predicate. All rows after that first matched value satisfy the 
condition 200 < x so we can mark all of
+/// those rows as matched.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT SEMI JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+///
+///          Sorted Buffered Side             Unsorted Streamed Side
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       100        │        1 │       500        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │
+///            ├──────────────────┤          ├──────────────────┤    
+///          3 │       200        │        3 │       300        │
+///            ├──────────────────┤          └──────────────────┘
+///          4 │       300        │ ─┐       
+///            ├──────────────────┤  | We emit matches for row 4 - 5
+///          5 │       400        │ ─┘ on the buffered side.
+///            └──────────────────┘
+///             min value: 200
+/// ```
+///
+/// For both types of joins, the buffered side must be sorted ascending for 
`Operator::Lt` (<) or
+/// `Operator::LtEq` (<=) and descending for `Operator::Gt` (>) or 
`Operator::GtEq` (>=).
+///
+/// ## Assumptions / Notation
+/// - \[R\], \[S\]: number of pages (blocks) of `R` and `S`
+/// - |R|, |S|: number of tuples in `R` and `S`
+/// - `B`: number of buffer pages
+///
+/// # Performance (cost)

Review Comment:
   I think this cost model is not correct -- In ancient times because disk are 
super slow, so the number of page fetches is used to model the performance. 
However today for OLAP systems, the bottleneck has shifted to the CPU, so I 
think it's better to use the work done by CPU to model the performance.
   e.g.
   NLJ cost = buffer-side-scan * probe-side-row-count
   PWMJ cost = buffer-side-scan * probe-side-batch-count



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2909 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{
+    new_null_array, Array, PrimitiveArray, PrimitiveBuilder, 
RecordBatchOptions,
+};
+use arrow::compute::take;
+use arrow::datatypes::{UInt32Type, UInt64Type};
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_common::{not_impl_err, NullEquality};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// Batch emits this number of rows when processing
+pub const DEFAULT_INCREMENTAL_BATCH_VALUE: usize = 8192;
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all right inputs as the `streamed' side 
and the left outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///

Review Comment:
   Can we add a quick pseudo code for the algorithm, like
   ```python
   for stream_row in stream_batch:
       for buffer_row in buffer_batch:
           if compare(stream_row, probe_row):
               output stream_row X buffer_batch[buffer_row:]
           else:
               continue
   ```
   And also explain why stream side is in the outer loop



##########
datafusion/physical-plan/src/joins/piecewise_merge_join.rs:
##########
@@ -0,0 +1,2071 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{new_null_array, Array, RecordBatchOptions};
+use arrow::compute::take;
+use arrow::{
+    array::{
+        ArrayRef, BooleanBufferBuilder, RecordBatch, UInt32Array, 
UInt32Builder,
+        UInt64Array, UInt64Builder,
+    },
+    compute::{concat_batches, sort_to_indices, take_record_batch},
+    util::bit_util,
+};
+use arrow_schema::{ArrowError, Schema, SchemaRef, SortOptions};
+use datafusion_common::NullEquality;
+use datafusion_common::{
+    exec_err, internal_err, plan_err, utils::compare_rows, JoinSide, Result, 
ScalarValue,
+};
+use datafusion_execution::{
+    memory_pool::{MemoryConsumer, MemoryReservation},
+    RecordBatchStream, SendableRecordBatchStream,
+};
+use datafusion_expr::{JoinType, Operator};
+use datafusion_functions_aggregate_common::min_max::{max_batch, min_batch};
+use datafusion_physical_expr::equivalence::join_equivalence_properties;
+use datafusion_physical_expr::{
+    LexOrdering, OrderingRequirements, PhysicalExpr, PhysicalExprRef, 
PhysicalSortExpr,
+};
+use datafusion_physical_expr_common::physical_expr::fmt_sql;
+use futures::{Stream, StreamExt, TryStreamExt};
+use parking_lot::Mutex;
+use std::fmt::Formatter;
+use std::{cmp::Ordering, task::ready};
+use std::{sync::Arc, task::Poll};
+
+use crate::execution_plan::{boundedness_from_children, EmissionType};
+
+use crate::joins::sort_merge_join::compare_join_arrays;
+use crate::joins::utils::{
+    get_final_indices_from_shared_bitmap, symmetric_join_output_partitioning,
+};
+use crate::{handle_state, DisplayAs, DisplayFormatType, 
ExecutionPlanProperties};
+use crate::{
+    joins::{
+        utils::{
+            build_join_schema, BuildProbeJoinMetrics, OnceAsync, OnceFut,
+            StatefulStreamResult,
+        },
+        SharedBitmapBuilder,
+    },
+    metrics::ExecutionPlanMetricsSet,
+    spill::get_record_batch_memory_size,
+    ExecutionPlan, PlanProperties,
+};
+
+/// `PiecewiseMergeJoinExec` is a join execution plan that only evaluates 
single range filter.
+///
+/// The physical planner will choose to evalute this join when there is only 
one range predicate. This
+/// is a binary expression which contains [`Operator::Lt`], 
[`Operator::LtEq`], [`Operator::Gt`], and
+/// [`Operator::GtEq`].:
+/// Examples:
+///  - `col0` < `colb`, `col0` <= `colb`, `col0` > `colb`, `col0` >= `colb`
+///
+/// Since the join only support range predicates, equijoins are not supported 
in `PiecewiseMergeJoinExec`,
+/// however you can first evaluate another join and run 
`PiecewiseMergeJoinExec` if left with one range
+/// predicate.
+///
+/// # Execution Plan Inputs
+/// For `PiecewiseMergeJoin` we label all left inputs as the `streamed' side 
and the right outputs as the
+/// 'buffered' side.
+///
+/// `PiecewiseMergeJoin` takes a sorted input for the side to be buffered and 
is able to sort streamed record
+/// batches during processing. Sorted input must specifically be 
ascending/descending based on the operator.
+///
+/// # Algorithms
+/// Classic joins are processed differently compared to existence joins.
+///
+/// ## Classic Joins (Inner, Full, Left, Right)
+/// For classic joins we buffer the right side (buffered), and incrementally 
process the left side (streamed).
+/// Every streamed batch is sorted so we can perform a sort merge algorithm. 
For the buffered side we want to
+/// have it already sorted either ascending or descending based on the 
operator as this allows us to emit all
+/// the rows from a given point to the end as matches. Sorting the streamed 
side allows us to start the pointer
+/// from the previous row's match on the buffered side.
+///
+/// Here is an example:
+///
+/// We perform a `JoinType::Left` with these two batches and the operator 
being `Operator::Lt`(<). For each
+/// row on the streamed side we move a pointer on the buffered until it 
matches the condition. Once we reach
+/// the row which matches (in this case with row 1 on streamed will have its 
first match on row 2 on
+/// buffered; 100 < 200 is true), we can emit all rows after that match. We 
can emit the rows like this because
+/// if the batch is sorted in ascending order, every subsequent row will also 
satisfy the condition as they will
+/// all be larger values.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+/// 
+/// Processing Row 1:
+///
+///            Sorted Streamed Side          Sorted Buffered Side  
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       100        │        1 │       100        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │ ─┐  
+///            ├──────────────────┤          ├──────────────────┤  │  For row 
1 on streamed side with
+///          3 │       500        │        3 │       200        │  │  value 
100, we emit rows 2 - 5
+///            └──────────────────┘          ├──────────────────┤  │  as 
matches when the operator is
+///                                        4 │       300        │  │  
`Operator::Lt` (<) Emitting all
+///                                          ├──────────────────┤  │  rows 
after the first match (row 2
+///                                        5 │       400        │ ─┘  buffered 
side; 100 < 200)
+///                                          └──────────────────┘     
+///
+/// Processing Row 2:
+///   By sorting the streamed side we know
+///
+///            Sorted Streamed Side          Sorted Buffered Side  
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       100        │        1 │       100        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │ <- Start 
here when probing for the streamed
+///            ├──────────────────┤          ├──────────────────┤    side row 
2.
+///          3 │       500        │        3 │       200        │
+///            └──────────────────┘          ├──────────────────┤
+///                                        4 │       300        │
+///                                          ├──────────────────┤
+///                                        5 │       400        |
+///                                          └──────────────────┘
+/// ```
+///
+/// ## Existence Joins (Semi, Anti, Mark)
+/// Existence joins are made magnitudes of times faster with a 
`PiecewiseMergeJoin` as we only need to find
+/// the min/max value of the streamed side to be able to emit all matches on 
the buffered side. By putting
+/// the side we need to mark onto the sorted buffer side, we can emit all 
these matches at once.
+///
+/// For Left Semi, Anti, and Mark joins we swap the inputs so that the marked 
side is on the buffered side.
+///
+/// Here is an example:
+/// We perform a `JoinType::LeftSemi` with these two batches and the operator 
being `Operator::Lt`(<). Because
+/// the operator is `Operator::Lt` we can find the minimum value in the 
streamed side; in this case it is 200.
+/// We can then advance a pointer from the start of the buffer side until we 
find the first value that satisfies
+/// the predicate. All rows after that first matched value satisfy the 
condition 200 < x so we can mark all of
+/// those rows as matched.
+///
+/// ```text
+/// SQL statement:
+/// SELECT *
+/// FROM (VALUES (100), (200), (500)) AS streamed(a)
+/// LEFT SEMI JOIN (VALUES (100), (200), (200), (300), (400)) AS buffered(b)
+///   ON streamed.a < buffered.b;
+/// 
+///           Unsorted Streamed Side         Sorted Buffered Side  
+///            ┌──────────────────┐          ┌──────────────────┐
+///          1 │       500        │        1 │       100        │
+///            ├──────────────────┤          ├──────────────────┤
+///          2 │       200        │        2 │       200        │
+///            ├──────────────────┤          ├──────────────────┤    
+///          3 │       300        │        3 │       200        │
+///            └──────────────────┘          ├──────────────────┤
+///               min value: 200           4 │       300        │ ─┐
+///                                          ├──────────────────┤  | We emit 
matches for row 4 - 5 on the
+///                                        5 │       400        | ─┘ buffered 
side.
+///                                          └──────────────────┘
+/// ```
+///
+/// For both types of joins, the buffered side must be sorted ascending for 
`Operator::Lt`(<) or
+/// `Operator::LtEq`(<=) and descending for `Operator::Gt`(>) or 
`Operator::GtEq`(>=).
+///
+/// # Further Reference Material
+/// DuckDB blog on Range Joins: [Range Joins in 
DuckDB](https://duckdb.org/2022/05/27/iejoin.html)
+#[derive(Debug)]
+pub struct PiecewiseMergeJoinExec {
+    /// Left sorted joining execution plan
+    pub streamed: Arc<dyn ExecutionPlan>,
+    /// Right sorting joining execution plan
+    pub buffered: Arc<dyn ExecutionPlan>,
+    /// The two expressions being compared
+    pub on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+    /// Comparison operator in the range predicate
+    pub operator: Operator,
+    /// How the join is performed
+    pub join_type: JoinType,
+    /// The schema once the join is applied
+    schema: SchemaRef,
+    buffered_fut: OnceAsync<BufferedSideData>,
+    /// Execution metrics
+    metrics: ExecutionPlanMetricsSet,
+    /// The left SortExpr
+    left_sort_exprs: LexOrdering,
+    /// The right SortExpr
+    right_sort_exprs: LexOrdering,
+    /// Sort options of join columns used in sorting the stream and buffered 
execution plans
+    sort_options: SortOptions,
+    /// Cache holding plan properties like equivalences, output partitioning 
etc.
+    cache: PlanProperties,
+}
+
+impl PiecewiseMergeJoinExec {
+    pub fn try_new(
+        streamed: Arc<dyn ExecutionPlan>,
+        buffered: Arc<dyn ExecutionPlan>,
+        on: (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>),
+        operator: Operator,
+        join_type: JoinType,
+    ) -> Result<Self> {
+        // TODO: Implement mark joins for PiecewiseMergeJoin
+        if matches!(join_type, JoinType::LeftMark | JoinType::RightMark) {
+            return plan_err!(
+                "Mark Joins are currently not supported for PiecewiseMergeJoin"
+            );
+        }
+
+        // We take the operator and enforce a sort order on the streamed + 
buffered side based on
+        // the operator type.
+        let sort_options = match operator {
+            Operator::Lt | Operator::LtEq => {
+                // For the Left existence joins the inputs will be swapped so 
we need to switch the sort
+                // options.
+                if is_left_existence_join(join_type) {
+                    SortOptions::new(true, false)
+                } else {
+                    SortOptions::new(false, false)
+                }
+            }
+            Operator::Gt | Operator::GtEq => {
+                if is_left_existence_join(join_type) {
+                    SortOptions::new(false, false)
+                } else {
+                    SortOptions::new(true, false)
+                }
+            }
+            _ => {
+                return plan_err!(
+                    "Cannot contain non-range operator in 
PiecewiseMergeJoinExec"
+                )
+            }
+        };
+
+        let left_sort_exprs =
+            vec![PhysicalSortExpr::new(Arc::clone(&on.0), sort_options)];
+        let right_sort_exprs =
+            vec![PhysicalSortExpr::new(Arc::clone(&on.1), sort_options)];
+        let Some(left_sort_exprs) = LexOrdering::new(left_sort_exprs) else {
+            return plan_err!(
+                "PiecewiseMergeJoinExec requires valid sort expressions for 
its left side"
+            );
+        };
+        let Some(right_sort_exprs) = LexOrdering::new(right_sort_exprs) else {
+            return plan_err!(
+                "PiecewiseMergeJoinExec requires valid sort expressions for 
its right side"
+            );
+        };
+
+        let streamed_schema = streamed.schema();
+        let buffered_schema = buffered.schema();
+
+        // Create output schema for the join
+        let schema =
+            Arc::new(build_join_schema(&streamed_schema, &buffered_schema, 
&join_type).0);
+        let cache = Self::compute_properties(
+            &streamed,
+            &buffered,
+            Arc::clone(&schema),
+            join_type,
+            &on,
+        )?;
+
+        Ok(Self {
+            streamed,
+            buffered,
+            on,
+            operator,
+            join_type,
+            schema,
+            buffered_fut: Default::default(),
+            metrics: ExecutionPlanMetricsSet::new(),
+            left_sort_exprs,
+            right_sort_exprs,
+            sort_options,
+            cache,
+        })
+    }
+
+    /// Refeference to streamed side execution plan
+    pub fn streamed(&self) -> &Arc<dyn ExecutionPlan> {
+        &self.streamed
+    }
+
+    /// Refeerence to buffered side execution plan
+    pub fn buffered(&self) -> &Arc<dyn ExecutionPlan> {
+        &self.buffered
+    }
+
+    /// Join type
+    pub fn join_type(&self) -> JoinType {
+        self.join_type
+    }
+
+    /// Reference to sort options
+    pub fn sort_options(&self) -> &SortOptions {
+        &self.sort_options
+    }
+
+    /// Get probe side (streameded side) for the PiecewiseMergeJoin
+    /// In current implementation, probe side is determined according to join 
type.
+    pub fn probe_side(join_type: &JoinType) -> JoinSide {
+        match join_type {
+            JoinType::Right
+            | JoinType::RightSemi
+            | JoinType::RightAnti
+            | JoinType::RightMark => JoinSide::Left,
+            JoinType::Inner
+            | JoinType::Left
+            | JoinType::Full
+            | JoinType::LeftAnti
+            | JoinType::LeftSemi
+            | JoinType::LeftMark => JoinSide::Right,
+        }
+    }
+
+    pub fn compute_properties(
+        streamed: &Arc<dyn ExecutionPlan>,
+        buffered: &Arc<dyn ExecutionPlan>,
+        schema: SchemaRef,
+        join_type: JoinType,
+        join_on: &(PhysicalExprRef, PhysicalExprRef),
+    ) -> Result<PlanProperties> {
+        let eq_properties = join_equivalence_properties(
+            streamed.equivalence_properties().clone(),
+            buffered.equivalence_properties().clone(),
+            &join_type,
+            schema,
+            &Self::maintains_input_order(join_type),
+            Some(Self::probe_side(&join_type)),
+            &[join_on.clone()],
+        )?;
+
+        let output_partitioning =
+            symmetric_join_output_partitioning(streamed, buffered, 
&join_type)?;
+
+        Ok(PlanProperties::new(
+            eq_properties,
+            output_partitioning,
+            EmissionType::Incremental,
+            boundedness_from_children([streamed, buffered]),
+        ))
+    }
+
+    fn maintains_input_order(join_type: JoinType) -> Vec<bool> {
+        match join_type {
+            // The existence side is expected to come in sorted
+            JoinType::LeftSemi | JoinType::LeftAnti | JoinType::LeftMark => {
+                vec![true, false]
+            }
+            JoinType::RightSemi | JoinType::RightAnti | JoinType::RightMark => 
{
+                vec![false, true]
+            }
+            // Left, Right, Full, Inner Join is not guaranteed to maintain
+            // input order as the streamed side will be sorted during
+            // execution for `PiecewiseMergeJoin`
+            _ => vec![false, false],
+        }
+    }
+
+    // TODO: We implement this with the physical planner.
+    pub fn swap_inputs(&self) -> Result<Arc<dyn ExecutionPlan>> {
+        todo!()
+    }
+}
+
+impl ExecutionPlan for PiecewiseMergeJoinExec {
+    fn name(&self) -> &str {
+        "PiecewiseMergeJoinExec"
+    }
+
+    fn as_any(&self) -> &dyn std::any::Any {
+        self
+    }
+
+    fn properties(&self) -> &PlanProperties {
+        &self.cache
+    }
+
+    fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
+        vec![&self.streamed, &self.buffered]
+    }
+
+    fn required_input_ordering(&self) -> Vec<Option<OrderingRequirements>> {
+        // Existence joins don't need to be sorted on one side.
+        if is_left_existence_join(self.join_type) {
+            // Left side needs to be sorted because this will be swapped to the
+            // buffered side
+            vec![
+                Some(OrderingRequirements::from(self.left_sort_exprs.clone())),
+                None,
+            ]
+        } else {
+            // We sort the left side in memory, so we do not need to enforce 
any sorting
+            vec![
+                None,
+                
Some(OrderingRequirements::from(self.right_sort_exprs.clone())),
+            ]
+        }
+    }
+
+    fn with_new_children(
+        self: Arc<Self>,
+        children: Vec<Arc<dyn ExecutionPlan>>,
+    ) -> Result<Arc<dyn ExecutionPlan>> {
+        match &children[..] {
+            [left, right] => Ok(Arc::new(PiecewiseMergeJoinExec::try_new(
+                Arc::clone(left),
+                Arc::clone(right),
+                self.on.clone(),
+                self.operator,
+                self.join_type,
+            )?)),
+            _ => internal_err!(
+                "PiecewiseMergeJoin should have 2 children, found {}",
+                children.len()
+            ),
+        }
+    }
+
+    fn execute(
+        &self,
+        partition: usize,
+        context: Arc<datafusion_execution::TaskContext>,
+    ) -> Result<SendableRecordBatchStream> {
+        let on_streamed = Arc::clone(&self.on.0);
+        let on_buffered = Arc::clone(&self.on.1);
+
+        // If the join type is either LeftSemi, LeftAnti, or LeftMark we will 
swap the inputs
+        // and sort ordering because we want the mark side to be the buffered 
side.
+        let (streamed, buffered, on_streamed, on_buffered, operator) =
+            if is_left_existence_join(self.join_type) {
+                (
+                    Arc::clone(&self.buffered),
+                    Arc::clone(&self.streamed),
+                    on_buffered,
+                    on_streamed,
+                    self.operator.swap().unwrap(),
+                )
+            } else {
+                (
+                    Arc::clone(&self.streamed),
+                    Arc::clone(&self.buffered),
+                    on_streamed,
+                    on_buffered,
+                    self.operator,
+                )
+            };
+
+        let metrics = BuildProbeJoinMetrics::new(0, &self.metrics);
+        let buffered_fut = self.buffered_fut.try_once(|| {
+            let reservation = MemoryConsumer::new("PiecewiseMergeJoinInput")
+                .register(context.memory_pool());
+            let buffered_stream = buffered.execute(partition, 
Arc::clone(&context))?;
+            Ok(build_buffered_data(
+                buffered_stream,
+                Arc::clone(&on_buffered),
+                metrics.clone(),
+                reservation,
+                build_visited_indices_map(self.join_type),
+            ))
+        })?;
+
+        let streamed = streamed.execute(partition, Arc::clone(&context))?;
+        let existence_join = is_existence_join(self.join_type());
+
+        Ok(Box::pin(PiecewiseMergeJoinStream::try_new(
+            Arc::clone(&self.schema),
+            on_streamed,
+            self.join_type,
+            operator,
+            streamed,
+            BufferedSide::Initial(BufferedSideInitialState { buffered_fut }),
+            if existence_join {
+                PiecewiseMergeJoinStreamState::FetchStreamBatch
+            } else {
+                PiecewiseMergeJoinStreamState::WaitBufferedSide
+            },
+            existence_join,
+            self.sort_options,
+            metrics,
+        )))
+    }
+}
+
+impl DisplayAs for PiecewiseMergeJoinExec {
+    fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter) -> 
std::fmt::Result {
+        let on_str = format!(
+            "({} {} {})",
+            fmt_sql(self.on.0.as_ref()),
+            self.operator,
+            fmt_sql(self.on.1.as_ref())
+        );
+
+        match t {
+            DisplayFormatType::Default | DisplayFormatType::Verbose => {
+                write!(
+                    f,
+                    "PiecewiseMergeJoin: operator={:?}, join_type={:?}, on={}",
+                    self.operator, self.join_type, on_str
+                )
+            }
+
+            DisplayFormatType::TreeRender => {
+                writeln!(f, "operator={:?}", self.operator)?;
+                if self.join_type != JoinType::Inner {
+                    writeln!(f, "join_type={:?}", self.join_type)?;
+                }
+                writeln!(f, "on={on_str}")
+            }
+        }
+    }
+}
+
+// Returns boolean for whether the join is an existence join
+fn is_existence_join(join_type: JoinType) -> bool {
+    matches!(
+        join_type,
+        JoinType::LeftAnti
+            | JoinType::RightAnti
+            | JoinType::LeftSemi
+            | JoinType::RightSemi
+            | JoinType::LeftMark
+            | JoinType::RightMark
+    )
+}
+
+// Returns boolean for whether the join is a left existence join
+fn is_left_existence_join(join_type: JoinType) -> bool {
+    matches!(
+        join_type,
+        JoinType::LeftAnti | JoinType::LeftSemi | JoinType::LeftMark
+    )
+}
+
+// Returns boolean to check if the join type needs to record
+// buffered side matches for classic joins
+fn need_produce_result_in_final(join_type: JoinType) -> bool {
+    matches!(join_type, JoinType::Full | JoinType::Right)
+}
+
+// Returns boolean for whether or not we need to build the buffered side
+// bitmap for marking matched rows on the buffered side.
+fn build_visited_indices_map(join_type: JoinType) -> bool {
+    matches!(
+        join_type,
+        JoinType::Full
+            | JoinType::Right
+            | JoinType::LeftAnti
+            | JoinType::RightAnti
+            | JoinType::LeftSemi
+            | JoinType::RightSemi
+            | JoinType::LeftMark
+            | JoinType::RightMark
+    )
+}
+
+async fn build_buffered_data(
+    buffered: SendableRecordBatchStream,
+    on_buffered: PhysicalExprRef,
+    metrics: BuildProbeJoinMetrics,
+    reservation: MemoryReservation,
+    build_map: bool,
+) -> Result<BufferedSideData> {
+    let schema = buffered.schema();
+
+    // Combine batches and record number of rows
+    let initial = (Vec::new(), 0, metrics, reservation);
+    let (batches, num_rows, metrics, mut reservation) = buffered
+        .try_fold(initial, |mut acc, batch| async {
+            let batch_size = get_record_batch_memory_size(&batch);
+            acc.3.try_grow(batch_size)?;
+            acc.2.build_mem_used.add(batch_size);
+            acc.2.build_input_batches.add(1);
+            acc.2.build_input_rows.add(batch.num_rows());
+            // Update row count
+            acc.1 += batch.num_rows();
+            // Push batch to output
+            acc.0.push(batch);
+            Ok(acc)
+        })
+        .await?;
+
+    let batches_iter = batches.iter().rev();
+    let single_batch = concat_batches(&schema, batches_iter)?;
+
+    // Evaluate physical expression on the buffered side.
+    let buffered_values = on_buffered
+        .evaluate(&single_batch)?
+        .into_array(single_batch.num_rows())?;
+
+    // We add the single batch size + the memory of the join keys
+    // size of the size estimation
+    let size_estimation = get_record_batch_memory_size(&single_batch)
+        + buffered_values.get_array_memory_size();
+    reservation.try_grow(size_estimation)?;
+    metrics.build_mem_used.add(size_estimation);
+
+    // Created visited indices bitmap only if the join type requires it
+    let visited_indices_bitmap = if build_map {
+        let bitmap_size = bit_util::ceil(single_batch.num_rows(), 8);
+        reservation.try_grow(bitmap_size)?;
+        metrics.build_mem_used.add(bitmap_size);
+
+        let mut bitmap_buffer = 
BooleanBufferBuilder::new(single_batch.num_rows());
+        bitmap_buffer.append_n(num_rows, false);
+        bitmap_buffer
+    } else {
+        BooleanBufferBuilder::new(0)
+    };
+
+    let buffered_data = BufferedSideData::new(
+        single_batch,
+        buffered_values,
+        Mutex::new(visited_indices_bitmap),
+        reservation,
+    );
+
+    Ok(buffered_data)
+}
+
+struct BufferedSideData {
+    batch: RecordBatch,
+    values: ArrayRef,
+    visited_indices_bitmap: SharedBitmapBuilder,
+    _reservation: MemoryReservation,
+}
+
+impl BufferedSideData {
+    fn new(
+        batch: RecordBatch,
+        values: ArrayRef,
+        visited_indices_bitmap: SharedBitmapBuilder,
+        reservation: MemoryReservation,
+    ) -> Self {
+        Self {
+            batch,
+            values,
+            visited_indices_bitmap,
+            _reservation: reservation,
+        }
+    }
+
+    fn batch(&self) -> &RecordBatch {
+        &self.batch
+    }
+
+    fn values(&self) -> &ArrayRef {
+        &self.values
+    }
+}
+
+enum BufferedSide {
+    /// Indicates that build-side not collected yet
+    Initial(BufferedSideInitialState),
+    /// Indicates that build-side data has been collected
+    Ready(BufferedSideReadyState),
+}
+
+impl BufferedSide {
+    // Takes a mutable state of the buffered row batches
+    fn try_as_initial_mut(&mut self) -> Result<&mut BufferedSideInitialState> {
+        match self {
+            BufferedSide::Initial(state) => Ok(state),
+            _ => internal_err!("Expected build side in initial state"),
+        }
+    }
+
+    fn try_as_ready(&self) -> Result<&BufferedSideReadyState> {
+        match self {
+            BufferedSide::Ready(state) => Ok(state),
+            _ => {
+                internal_err!("Expected build side in ready state")
+            }
+        }
+    }
+
+    /// Tries to extract BuildSideReadyState from BuildSide enum.
+    /// Returns an error if state is not Ready.
+    fn try_as_ready_mut(&mut self) -> Result<&mut BufferedSideReadyState> {
+        match self {
+            BufferedSide::Ready(state) => Ok(state),
+            _ => internal_err!("Expected build side in ready state"),
+        }
+    }
+}
+
+struct BufferedSideInitialState {
+    buffered_fut: OnceFut<BufferedSideData>,
+}
+
+struct BufferedSideReadyState {
+    /// Collected build-side data
+    buffered_data: Arc<BufferedSideData>,
+}
+
+enum PiecewiseMergeJoinStreamState {
+    WaitBufferedSide,
+    FetchStreamBatch,
+    ProcessStreamBatch(StreamedBatch),
+    ExhaustedStreamSide,
+    Completed,
+}
+
+impl PiecewiseMergeJoinStreamState {
+    // Grab mutable reference to the current stream batch
+    fn try_as_process_stream_batch_mut(&mut self) -> Result<&mut 
StreamedBatch> {
+        match self {
+            PiecewiseMergeJoinStreamState::ProcessStreamBatch(state) => 
Ok(state),
+            _ => internal_err!("Expected streamed batch in StreamBatch"),
+        }
+    }
+}
+
+struct StreamedBatch {
+    pub batch: RecordBatch,
+    values: Vec<ArrayRef>,
+}
+
+impl StreamedBatch {
+    fn new(batch: RecordBatch, values: Vec<ArrayRef>) -> Self {
+        Self { batch, values }
+    }
+
+    fn values(&self) -> &Vec<ArrayRef> {
+        &self.values
+    }
+}
+
+struct PiecewiseMergeJoinStream {
+    // Output schema of the `PiecewiseMergeJoin`
+    pub schema: Arc<Schema>,
+
+    // Physical expression that is evaluated on the streamed side
+    // We do not need on_buffered as this is already evaluated when
+    // creating the buffered side which happens before initializing
+    // `PiecewiseMergeJoinStream`
+    pub on_streamed: PhysicalExprRef,
+    // Type of join
+    pub join_type: JoinType,
+    // Comparison operator
+    pub operator: Operator,
+    // Streamed batch
+    pub streamed: SendableRecordBatchStream,
+    // Streamed schema
+    streamed_schema: SchemaRef,
+    // Buffered side data
+    buffered_side: BufferedSide,
+    // Stores the min max value for the streamed side, only needed
+    // for existence joins.
+    streamed_global_min_max: Mutex<Option<ScalarValue>>,
+    // Tracks the state of the `PiecewiseMergeJoin`
+    state: PiecewiseMergeJoinStreamState,
+    // Flag for whehter or not the join_type is an existence join.
+    existence_join: bool,

Review Comment:
   I suggest to calculate it on the fly (or at least point to the util function 
in the comment), I think it's easier to follow the logic, and it can't be the 
bottleneck.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org
For additional commands, e-mail: github-h...@datafusion.apache.org


Reply via email to