This is an automated email from the ASF dual-hosted git repository.

tjbanghart pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/calcite.git


The following commit(s) were added to refs/heads/main by this push:
     new 32692447e8 [CALCITE-7254] Add rule for sharing trivially equivalent 
RelNodes within Combine
32692447e8 is described below

commit 32692447e8b3901e0ee9789a089b0f82566a186a
Author: TJ Banghart <[email protected]>
AuthorDate: Mon Dec 1 10:27:47 2025 -0800

    [CALCITE-7254] Add rule for sharing trivially equivalent RelNodes within 
Combine
---
 .../adapter/enumerable/EnumerableCombine.java      | 167 +++++++
 .../adapter/enumerable/EnumerableCombineRule.java  |  53 +++
 .../adapter/enumerable/EnumerableRules.java        |   6 +
 .../org/apache/calcite/plan/SpoolRelOptTable.java  | 145 ++++++
 .../java/org/apache/calcite/rel/core/Combine.java  |  42 +-
 .../rel/rules/CombineSimpleEquivalenceRule.java    | 212 +++++++++
 .../org/apache/calcite/runtime/SqlFunctions.java   |  37 ++
 .../calcite/test/CombineRelOptRulesTest.java       | 509 +++++++++++++++++++++
 .../org/apache/calcite/test/SqlFunctionsTest.java  |  33 ++
 .../test/enumerable/EnumerableCombineTest.java     | 167 +++++++
 .../apache/calcite/test/CombineRelOptRulesTest.xml | 371 +++++++++++++++
 11 files changed, 1730 insertions(+), 12 deletions(-)

diff --git 
a/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableCombine.java
 
b/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableCombine.java
new file mode 100644
index 0000000000..65340259eb
--- /dev/null
+++ 
b/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableCombine.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.enumerable;
+
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.linq4j.Ord;
+import org.apache.calcite.linq4j.tree.BlockBuilder;
+import org.apache.calcite.linq4j.tree.Expression;
+import org.apache.calcite.linq4j.tree.Expressions;
+import org.apache.calcite.linq4j.tree.ParameterExpression;
+import org.apache.calcite.linq4j.tree.Types;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Combine;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.runtime.SqlFunctions;
+import org.apache.calcite.util.BuiltInMethod;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** Implementation of {@link org.apache.calcite.rel.core.Combine} in
+ * {@link org.apache.calcite.adapter.enumerable.EnumerableConvention 
enumerable calling convention}.
+ *
+ * <p>The output format is a wide table where each column corresponds to a 
query
+ * (named QUERY_0, QUERY_1, etc.) and each row contains a struct (map) with 
that
+ * query's column values for that row index. The number of output rows equals 
the
+ * maximum row count across all input queries. Queries with fewer rows have 
null
+ * values for the additional rows.
+ *
+ * <p>Example output for two queries:
+ * <pre>
+ * QUERY_0                  | QUERY_1
+ * ------------------------ | ------------------------
+ * {empno=100, name=Bill}   | {deptno=10, name=Sales}
+ * {empno=110, name=Eric}   | {deptno=20, name=HR}
+ * {empno=120, name=Ted}    | null
+ * </pre>
+ */
+public class EnumerableCombine extends Combine implements EnumerableRel {
+  public EnumerableCombine(RelOptCluster cluster, RelTraitSet traitSet,
+      List<RelNode> inputs) {
+    super(cluster, traitSet, inputs);
+  }
+
+  @Override public EnumerableCombine copy(RelTraitSet traitSet, List<RelNode> 
inputs) {
+    return new EnumerableCombine(getCluster(), traitSet, inputs);
+  }
+
+  @Override public Result implement(EnumerableRelImplementor implementor, 
Prefer pref) {
+    final BlockBuilder builder = new BlockBuilder();
+    final RelDataType rowType = getRowType();
+
+    // Collect all query results as lists of maps
+    // Each list corresponds to one query, containing maps for each row
+    final List<Expression> queryLists = new ArrayList<>();
+
+    for (Ord<RelNode> ord : Ord.zip(inputs)) {
+      EnumerableRel input = (EnumerableRel) ord.e;
+      final Result result = implementor.visitChild(this, ord.i, input, pref);
+      Expression childExp =
+          builder.append(
+              "child" + ord.i,
+              result.block);
+
+      // Get column names for this input
+      final List<RelDataTypeField> fields = input.getRowType().getFieldList();
+      final int fieldCount = fields.size();
+
+      // Transform each row to a Map with column names as keys
+      ParameterExpression row = Expressions.parameter(Object.class, "row" + 
ord.i);
+
+      // Build the arguments for SqlFunctions.map(key1, val1, key2, val2, ...)
+      List<Expression> mapArgs = new ArrayList<>();
+      for (int i = 0; i < fieldCount; i++) {
+        String colName = fields.get(i).getName();
+        mapArgs.add(Expressions.constant(colName));
+        if (fieldCount > 1) {
+          // Multi-column: access row[i]
+          mapArgs.add(
+              Expressions.arrayIndex(
+                  Expressions.convert_(row, Object[].class),
+                  Expressions.constant(i)));
+        } else {
+          // Single column: use row directly
+          mapArgs.add(row);
+        }
+      }
+
+      Expression mapCall =
+          Expressions.call(
+              SqlFunctions.class,
+              "map",
+              Expressions.newArrayInit(Object.class, mapArgs));
+
+      Expression selectLambda = Expressions.lambda(mapCall, row);
+      Expression enumerableToConvert =
+          builder.append("converted" + ord.i,
+              Expressions.call(
+                  childExp,
+                  BuiltInMethod.SELECT.method,
+                  selectLambda));
+
+      // Convert Enumerable to List
+      Expression listExp =
+          builder.append(
+              "list" + ord.i,
+              Expressions.call(
+                  enumerableToConvert,
+                  Types.lookupMethod(
+                      Enumerable.class,
+                      "toList")));
+
+      queryLists.add(listExp);
+    }
+
+    // The physical type: each row is Object[] with one element per query
+    final PhysType physType =
+        PhysTypeImpl.of(
+            implementor.getTypeFactory(),
+            rowType,
+            pref.prefer(JavaRowFormat.ARRAY));
+
+    // Create an array of all query result lists
+    Expression queryListsArray =
+        builder.append("queryLists",
+            Expressions.newArrayInit(List.class, queryLists));
+
+    // Call helper method to combine results into rows
+    // combineQueryResults(List[] queryLists) -> List<Object[]>
+    Expression combinedRows =
+        builder.append("combinedRows",
+            Expressions.call(
+                SqlFunctions.class,
+                "combineQueryResults",
+                queryListsArray));
+
+    Expression enumerableExp =
+        Expressions.call(
+            Types.lookupMethod(
+                Linq4j.class,
+                "asEnumerable",
+                List.class),
+            combinedRows);
+
+    builder.add(enumerableExp);
+
+    return implementor.result(physType, builder.toBlock());
+  }
+}
diff --git 
a/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableCombineRule.java
 
b/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableCombineRule.java
new file mode 100644
index 0000000000..b85e67d3e6
--- /dev/null
+++ 
b/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableCombineRule.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.enumerable;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.convert.ConverterRule;
+import org.apache.calcite.rel.core.Combine;
+import org.apache.calcite.util.Util;
+
+import java.util.List;
+
+/**
+ * Rule to convert a {@link Combine} to an {@link EnumerableCombine}.
+ *
+ * @see EnumerableRules#ENUMERABLE_COMBINE_RULE
+ */
+class EnumerableCombineRule extends ConverterRule {
+  /** Default configuration. */
+  static final Config DEFAULT_CONFIG = Config.INSTANCE
+      .withConversion(Combine.class, Convention.NONE,
+          EnumerableConvention.INSTANCE, "EnumerableCombineRule")
+      .withRuleFactory(EnumerableCombineRule::new);
+
+  /** Called from the Config. */
+  protected EnumerableCombineRule(Config config) {
+    super(config);
+  }
+
+  @Override public RelNode convert(RelNode rel) {
+    final Combine combine = (Combine) rel;
+    final EnumerableConvention out = EnumerableConvention.INSTANCE;
+    final RelTraitSet traitSet = rel.getCluster().traitSet().replace(out);
+    final List<RelNode> newInputs =
+        Util.transform(combine.getInputs(), n -> convert(n, traitSet));
+    return new EnumerableCombine(rel.getCluster(), traitSet, newInputs);
+  }
+}
diff --git 
a/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableRules.java 
b/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableRules.java
index a575d18943..f33997450c 100644
--- 
a/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableRules.java
+++ 
b/core/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableRules.java
@@ -103,6 +103,11 @@ private EnumerableRules() {
   public static final EnumerableUnionRule ENUMERABLE_UNION_RULE =
       EnumerableUnionRule.DEFAULT_CONFIG.toRule(EnumerableUnionRule.class);
 
+  /** Rule that converts a {@link org.apache.calcite.rel.core.Combine}
+   * to an {@link EnumerableCombine}. */
+  public static final EnumerableCombineRule ENUMERABLE_COMBINE_RULE =
+      EnumerableCombineRule.DEFAULT_CONFIG.toRule(EnumerableCombineRule.class);
+
   /** Rule that converts a {@link LogicalRepeatUnion} into an
    * {@link EnumerableRepeatUnion}. */
   public static final EnumerableRepeatUnionRule ENUMERABLE_REPEAT_UNION_RULE =
@@ -224,6 +229,7 @@ private EnumerableRules() {
           EnumerableRules.ENUMERABLE_UNCOLLECT_RULE,
           EnumerableRules.ENUMERABLE_MERGE_UNION_RULE,
           EnumerableRules.ENUMERABLE_UNION_RULE,
+          EnumerableRules.ENUMERABLE_COMBINE_RULE,
           EnumerableRules.ENUMERABLE_REPEAT_UNION_RULE,
           EnumerableRules.ENUMERABLE_TABLE_SPOOL_RULE,
           EnumerableRules.ENUMERABLE_INTERSECT_RULE,
diff --git a/core/src/main/java/org/apache/calcite/plan/SpoolRelOptTable.java 
b/core/src/main/java/org/apache/calcite/plan/SpoolRelOptTable.java
new file mode 100644
index 0000000000..0909b951c4
--- /dev/null
+++ b/core/src/main/java/org/apache/calcite/plan/SpoolRelOptTable.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.plan;
+
+import org.apache.calcite.linq4j.tree.Expression;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelDistribution;
+import org.apache.calcite.rel.RelDistributions;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelReferentialConstraint;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.schema.ColumnStrategy;
+import org.apache.calcite.schema.Statistic;
+import org.apache.calcite.schema.Statistics;
+import org.apache.calcite.schema.Table;
+import org.apache.calcite.schema.impl.ListTransientTable;
+import org.apache.calcite.util.ImmutableBitSet;
+
+import com.google.common.collect.ImmutableList;
+
+import org.checkerframework.checker.nullness.qual.Nullable;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Implementation of {@link RelOptTable} for temporary spool tables.
+ *
+ * <p>This table represents temporary storage used by spool operators
+ * during query execution. It's used for planning purposes only and
+ * will be converted to appropriate physical operators later.
+ */
+public class SpoolRelOptTable implements RelOptTable {
+  private final @Nullable RelOptSchema schema;
+  private final RelDataType rowType;
+  private final String name;
+  private final double rowCount;
+  private final Table table;
+
+  /**
+   * Creates a SpoolRelOptTable with explicit row count.
+   *
+   * @param schema the schema this table belongs to (can be null for temporary 
tables)
+   * @param rowType the row type of the data that will be stored in this spool
+   * @param name optional name for the spool table
+   * @param rowCount the estimated number of rows that will be materialized in 
this spool
+   */
+  public SpoolRelOptTable(
+      @Nullable RelOptSchema schema,
+      RelDataType rowType,
+      String name,
+      double rowCount) {
+    this.schema = schema;
+    this.rowType = rowType;
+    this.name = name;
+    this.rowCount = rowCount;
+    // Use standard ListTransientTable with custom statistics for accurate 
cost estimation
+    this.table = new ListTransientTable(name, rowType) {
+      @Override public Statistic getStatistic() {
+        return Statistics.of(rowCount, ImmutableList.of());
+      }
+    };
+  }
+
+  @Override public RelNode toRel(ToRelContext context) {
+    // This shouldn't be called during planning - spools are created 
differently
+    throw new UnsupportedOperationException("SpoolRelOptTable.toRel should not 
be called");
+  }
+
+  @Override public List<String> getQualifiedName() {
+    return ImmutableList.of("TEMP", name);
+  }
+
+  @Override public double getRowCount() {
+    // Return the actual row count of the materialized data in this spool
+    return rowCount;
+  }
+
+  @Override public RelDataType getRowType() {
+    return rowType;
+  }
+
+  @Override public @Nullable RelOptSchema getRelOptSchema() {
+    return schema;
+  }
+
+  @Override public @Nullable RelDistribution getDistribution() {
+    return RelDistributions.ANY;
+  }
+
+  @Override public @Nullable List<ImmutableBitSet> getKeys() {
+    // Spools typically don't have keys
+    return ImmutableList.of();
+  }
+
+  @Override public @Nullable List<RelReferentialConstraint> 
getReferentialConstraints() {
+    // Temporary tables don't have referential constraints
+    return ImmutableList.of();
+  }
+
+  @Override public @Nullable List<RelCollation> getCollationList() {
+    // Could be extended to preserve collations from the input
+    return ImmutableList.of();
+  }
+
+  @Override public boolean isKey(ImmutableBitSet columns) {
+    return false;
+  }
+
+  @Override public @Nullable Expression getExpression(Class clazz) {
+    // Return null so EnumerableTableScanRule won't try to convert spool table 
scans
+    // Spool table scans are handled within the spool operator itself
+    return null;
+  }
+
+  @Override public RelOptTable extend(List<RelDataTypeField> extendedFields) {
+    throw new UnsupportedOperationException("SpoolRelOptTable.extend should 
not be called");
+  }
+
+  @Override public List<ColumnStrategy> getColumnStrategies() {
+    return Collections.emptyList();
+  }
+
+  @Override public <C> @Nullable C unwrap(Class<C> aClass) {
+    if (aClass.isInstance(table)) {
+      return aClass.cast(table);
+    }
+    return null;
+  }
+}
diff --git a/core/src/main/java/org/apache/calcite/rel/core/Combine.java 
b/core/src/main/java/org/apache/calcite/rel/core/Combine.java
index 286facf2c8..6c99a3a4de 100644
--- a/core/src/main/java/org/apache/calcite/rel/core/Combine.java
+++ b/core/src/main/java/org/apache/calcite/rel/core/Combine.java
@@ -35,9 +35,11 @@
 /**
  * A relational operator that combines multiple relational expressions into a 
single root.
  * This is used for multi-root optimization in the VolcanoPlanner.
+ *
+ * @see org.apache.calcite.adapter.enumerable.EnumerableCombine
  */
 public class Combine extends AbstractRelNode {
-  protected final ImmutableList<RelNode> inputs;
+  protected ImmutableList<RelNode> inputs;
 
   /** Creates a Combine. */
   public static Combine create(RelOptCluster cluster, RelTraitSet traitSet, 
List<RelNode> inputs) {
@@ -54,6 +56,25 @@ public Combine(RelOptCluster cluster, RelTraitSet traitSet, 
List<RelNode> inputs
     return inputs;
   }
 
+  @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
+    return new Combine(getCluster(), traitSet, inputs);
+  }
+
+  @Override public void replaceInput(int ordinalInParent, RelNode rel) {
+    // Combine has multiple inputs stored in an immutable list.
+    // To replace an input, we need to create a new list with the replacement.
+    ImmutableList.Builder<RelNode> newInputs = ImmutableList.builder();
+    for (int i = 0; i < inputs.size(); i++) {
+      if (i == ordinalInParent) {
+        newInputs.add(rel);
+      } else {
+        newInputs.add(inputs.get(i));
+      }
+    }
+    inputs = newInputs.build();
+  }
+
+
   @Override public RelWriter explainTerms(RelWriter pw) {
     super.explainTerms(pw);
     for (Ord<RelNode> ord : Ord.zip(inputs)) {
@@ -63,21 +84,18 @@ public Combine(RelOptCluster cluster, RelTraitSet traitSet, 
List<RelNode> inputs
   }
 
   @Override protected RelDataType deriveRowType() {
-    // Combine represents multiple independent result sets that are not merged.
-    // Each input maintains its own row type and is accessed independently.
-    //
-    // We use a struct type where each field represents one of the input 
queries.
-    // This allows metadata and optimization rules to understand the structure
-    // while making it clear that results are not unified into a single stream.
-
     RelDataTypeFactory typeFactory = getCluster().getTypeFactory();
     RelDataTypeFactory.Builder builder = typeFactory.builder();
 
+    // One column per input query (QUERY_0, QUERY_1, etc.)
+    // Each cell is a nullable MAP representing a struct with column names as 
keys
+    RelDataType anyType = typeFactory.createJavaType(Object.class);
+    RelDataType mapType =
+        typeFactory.createMapType(typeFactory.createJavaType(String.class), 
anyType);
+    RelDataType nullableMapType = 
typeFactory.createTypeWithNullability(mapType, true);
+
     for (int i = 0; i < inputs.size(); i++) {
-      RelNode input = inputs.get(i);
-      // Create a field for each input with its row type
-      // Field names are "QUERY_0", "QUERY_1", etc.
-      builder.add("QUERY_" + i, input.getRowType());
+      builder.add("QUERY_" + i, nullableMapType);
     }
 
     return builder.build();
diff --git 
a/core/src/main/java/org/apache/calcite/rel/rules/CombineSimpleEquivalenceRule.java
 
b/core/src/main/java/org/apache/calcite/rel/rules/CombineSimpleEquivalenceRule.java
new file mode 100644
index 0000000000..eba15bcddc
--- /dev/null
+++ 
b/core/src/main/java/org/apache/calcite/rel/rules/CombineSimpleEquivalenceRule.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.rel.rules;
+
+import org.apache.calcite.plan.RelDigest;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.RelRule;
+import org.apache.calcite.plan.SpoolRelOptTable;
+import org.apache.calcite.rel.RelCommonExpressionBasicSuggester;
+import org.apache.calcite.rel.RelHomogeneousShuttle;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Combine;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.core.Spool;
+import org.apache.calcite.rel.logical.LogicalTableScan;
+import org.apache.calcite.rel.logical.LogicalTableSpool;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+
+import com.google.common.collect.ImmutableList;
+
+import org.immutables.value.Value;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Rule that optimizes a {@link Combine} operator by detecting shared 
sub-expressions
+ * across its inputs and introducing {@link Spool}s to avoid redundant 
computation.
+ *
+ * <p>This rule identifies structurally equivalent sub-plans within a 
Combine's inputs
+ * and replaces them with a spool pattern: the first occurrence becomes a 
producer
+ * (TableSpool that materializes the result), and subsequent occurrences become
+ * consumers (TableScan reading from the spooled data).
+ *
+ * <h2>Example</h2>
+ *
+ * <p>Consider two queries combined that share a common filtered table scan:
+ *
+ * <pre>{@code
+ * -- Query 1: Count high earners
+ * SELECT COUNT(*) FROM EMP WHERE SAL > 2000
+ * -- Query 2: Average salary of high earners
+ * SELECT AVG(SAL) FROM EMP WHERE SAL > 2000
+ * }</pre>
+ *
+ * <p>Before this rule applies, the plan looks like:
+ *
+ * <pre>{@code
+ * Combine
+ *   LogicalAggregate(group=[{}], CNT=[COUNT()])
+ *     LogicalFilter(condition=[>(SAL, 2000)])
+ *       LogicalTableScan(table=[EMP])
+ *   LogicalAggregate(group=[{}], AVG_SAL=[AVG(SAL)])
+ *     LogicalFilter(condition=[>(SAL, 2000)])
+ *       LogicalTableScan(table=[EMP])
+ * }</pre>
+ *
+ * <p>After this rule identifies the shared {@code Filter(SAL > 2000) -> 
TableScan(EMP)}
+ * sub-expression, the plan becomes:
+ *
+ * <pre>{@code
+ * Combine
+ *   LogicalAggregate(group=[{}], CNT=[COUNT()])
+ *     LogicalTableSpool(table=[spool_0])        -- Producer: materializes 
filtered rows
+ *       LogicalFilter(condition=[>(SAL, 2000)])
+ *         LogicalTableScan(table=[EMP])
+ *   LogicalAggregate(group=[{}], AVG_SAL=[AVG(SAL)])
+ *     LogicalTableScan(table=[spool_0])         -- Consumer: reads from spool
+ * }</pre>
+ *
+ * @see Combine
+ * @see Spool
+ * @see RelCommonExpressionBasicSuggester
+ */
[email protected]
+public class CombineSimpleEquivalenceRule extends 
RelRule<CombineSimpleEquivalenceRule.Config> {
+
+  /** Creates a CombineSharedComponentsRule. */
+  protected CombineSimpleEquivalenceRule(Config config) {
+    super(config);
+  }
+
+  @Override public void onMatch(RelOptRuleCall call) {
+    RelNode combine = RelOptUtil.stripAll(call.rel(0));
+
+    // Use the suggester to find shared components
+    RelCommonExpressionBasicSuggester suggester = new 
RelCommonExpressionBasicSuggester();
+    Collection<RelNode> sharedComponents = suggester.suggest(combine, null);
+
+    // Filter out any components that are already spools or scans from spool 
tables
+    // to avoid creating spools of spools
+    sharedComponents = sharedComponents.stream()
+        .filter(node -> {
+          if (node instanceof Spool) {
+            return false;
+          }
+          // Skip if it's a TableScan reading from a spool table
+          if (node instanceof LogicalTableScan) {
+            LogicalTableScan scan = (LogicalTableScan) node;
+            // Check if the underlying table is a SpoolRelOptTable
+            return !(scan.getTable() instanceof SpoolRelOptTable);
+          }
+          return true;
+        })
+        .collect(java.util.stream.Collectors.toList());
+
+    // If no shared components found, nothing to do
+    if (sharedComponents.isEmpty()) {
+      return;
+    }
+
+    // Map to track which shared component digest gets which spool
+    Map<RelDigest, LogicalTableSpool> digestToSpool = new HashMap<>();
+    int spoolCounter = 0;
+
+    // Get metadata query for row count estimation
+    final RelMetadataQuery mq = call.getMetadataQuery();
+
+    // For each shared component, create a spool
+    for (RelNode sharedComponent : sharedComponents) {
+      // Get the actual row count of the shared component being materialized
+      double actualRowCount = mq.getRowCount(sharedComponent);
+
+      SpoolRelOptTable spoolTable =
+          new SpoolRelOptTable(null,  // no schema needed for temporary tables
+          sharedComponent.getRowType(),
+          "spool_" + spoolCounter++,
+          actualRowCount); // Pass the actual row count for accurate 
cardinality);
+
+      // Create the TableSpool that will produce/write to this table
+      LogicalTableSpool spool =
+          (LogicalTableSpool) 
RelFactories.DEFAULT_SPOOL_FACTORY.createTableSpool(
+              sharedComponent,
+              Spool.Type.LAZY,  // Read type
+              Spool.Type.LAZY,  // Write type
+              spoolTable);
+
+      digestToSpool.put(sharedComponent.getRelDigest(), spool);
+    }
+
+    combine =
+        combine.accept(getReplacer(digestToSpool));
+
+    call.transformTo(combine);
+  }
+
+  private static RelHomogeneousShuttle getReplacer(
+      Map<RelDigest, LogicalTableSpool> digestToSpool) {
+    Set<RelDigest> producers = new HashSet<>();
+
+    return new RelHomogeneousShuttle() {
+      @Override public RelNode visit(RelNode node) {
+        // Check if this node's digest matches any of our shared components
+        RelDigest nodeDigest = node.getRelDigest();
+        if (digestToSpool.containsKey(nodeDigest)) {
+          LogicalTableSpool spool = digestToSpool.get(nodeDigest);
+
+          if (producers.contains(nodeDigest)) {
+            // Subsequent occurrence - replace with table scan (consumer)
+            return LogicalTableScan.create(
+                node.getCluster(),
+                spool.getTable(),
+                ImmutableList.of());
+          } else {
+            // First occurrence - replace with the spool (producer)
+            producers.add(nodeDigest);
+            return spool;
+          }
+        }
+
+        return super.visit(node);
+      }
+    };
+  }
+
+
+  /** Rule configuration. */
+  @Value.Immutable(singleton = true)
+  public interface Config extends RelRule.Config {
+    Config DEFAULT = ImmutableCombineSimpleEquivalenceRule.Config.builder()
+        .build()
+        .withOperandFor(Combine.class);
+
+    @Override default CombineSimpleEquivalenceRule toRule() {
+      return new CombineSimpleEquivalenceRule(this);
+    }
+
+    default Config withOperandFor(Class<? extends Combine> combineClass) {
+      return withOperandSupplier(b -> b.operand(combineClass)
+          .anyInputs())
+          .as(Config.class);
+    }
+  }
+}
diff --git a/core/src/main/java/org/apache/calcite/runtime/SqlFunctions.java 
b/core/src/main/java/org/apache/calcite/runtime/SqlFunctions.java
index 45c2f7be97..ede46373b8 100644
--- a/core/src/main/java/org/apache/calcite/runtime/SqlFunctions.java
+++ b/core/src/main/java/org/apache/calcite/runtime/SqlFunctions.java
@@ -6884,6 +6884,43 @@ public static Map map(Object... args) {
     return map;
   }
 
+  /** Combines multiple query result lists into rows for the Combine operator.
+   *
+   * <p>Each input list contains maps representing rows from a query.
+   * The output is a list of Object arrays, where each array is a row
+   * with one element per query. The number of output rows equals the
+   * maximum size across all input lists. Shorter lists are padded with nulls.
+   *
+   * @param queryLists array of lists, one per query
+   * @return list of Object arrays representing combined rows
+   */
+  public static List<@Nullable Object[]> combineQueryResults(List[] 
queryLists) {
+    // Find the maximum row count across all queries
+    int maxRows = 0;
+    for (List list : queryLists) {
+      if (list.size() > maxRows) {
+        maxRows = list.size();
+      }
+    }
+
+    // Build the result rows
+    List<@Nullable Object[]> result = new ArrayList<>(maxRows);
+    for (int rowIdx = 0; rowIdx < maxRows; rowIdx++) {
+      @Nullable Object[] row = new Object[queryLists.length];
+      for (int queryIdx = 0; queryIdx < queryLists.length; queryIdx++) {
+        List queryList = queryLists[queryIdx];
+        if (rowIdx < queryList.size()) {
+          row[queryIdx] = queryList.get(rowIdx);
+        } else {
+          row[queryIdx] = null;
+        }
+      }
+      result.add(row);
+    }
+
+    return result;
+  }
+
   /** Support the STR_TO_MAP function. */
   public static Map strToMap(String string, String stringDelimiter, String 
keyValueDelimiter) {
     final Map map = new LinkedHashMap();
diff --git 
a/core/src/test/java/org/apache/calcite/test/CombineRelOptRulesTest.java 
b/core/src/test/java/org/apache/calcite/test/CombineRelOptRulesTest.java
new file mode 100644
index 0000000000..6ca1cbb449
--- /dev/null
+++ b/core/src/test/java/org/apache/calcite/test/CombineRelOptRulesTest.java
@@ -0,0 +1,509 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.test;
+
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.RelCollationTraitDef;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Combine;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.rules.CombineSimpleEquivalenceRule;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.tools.RelBuilder;
+
+import org.junit.jupiter.api.Test;
+
+import java.util.function.Function;
+
+/**
+ * Unit tests for {@link Combine} RelNode demonstrating various
+ * shared component patterns including joins, filters, aggregations, and 
projections.
+ */
+class CombineRelOptRulesTest extends RelOptTestBase {
+
+  @Override RelOptFixture fixture() {
+    return super.fixture()
+        .withDiffRepos(DiffRepository.lookup(CombineRelOptRulesTest.class));
+  }
+
+  @Test void testSharedJoin() {
+    // Two queries sharing the same EMP-DEPT join
+    // Query 1: SELECT E.EMPNO, D.DNAME FROM EMP E JOIN DEPT D ON E.DEPTNO = 
D.DEPTNO
+    // Query 2: SELECT E.ENAME, D.LOC FROM EMP E JOIN DEPT D ON E.DEPTNO = 
D.DEPTNO
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .project(b.field("EMPNO"), b.field("DNAME"));
+
+      // Query 2
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .project(b.field("ENAME"), b.field("LOC"));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testSharedComplexJoin() {
+    // Multiple queries sharing a 3-way join: EMP -> DEPT -> SALGRADE
+    // Query 1: Count employees per department grade
+    // Query 2: Average salary per department grade
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: SELECT D.DNAME, S.GRADE, COUNT(*) ...
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .scan("SALGRADE")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.AND,
+                  b.call(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
+                      b.field(2, 0, "SAL"),
+                      b.field(2, 1, "LOSAL")),
+                  b.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+                      b.field(2, 0, "SAL"),
+                      b.field(2, 1, "HISAL"))))
+          .aggregate(
+              b.groupKey("DNAME", "GRADE"),
+              b.count(false, "EMP_COUNT"));
+
+      // Query 2: SELECT D.DNAME, S.GRADE, AVG(SAL) ...
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .scan("SALGRADE")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.AND,
+                  b.call(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
+                      b.field(2, 0, "SAL"),
+                      b.field(2, 1, "LOSAL")),
+                  b.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+                      b.field(2, 0, "SAL"),
+                      b.field(2, 1, "HISAL"))))
+          .aggregate(
+              b.groupKey("DNAME", "GRADE"),
+              b.avg(false, "AVG_SAL", b.field("SAL")));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  // ========== Shared Filter Tests ==========
+
+  @Test void testSharedFilter() {
+    // Two queries sharing the same filter condition
+    // Query 1: SELECT EMPNO, SAL FROM EMP WHERE SAL > 2000 AND DEPTNO = 10
+    // Query 2: SELECT ENAME, JOB FROM EMP WHERE SAL > 2000 AND DEPTNO = 10
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.AND,
+                  b.call(SqlStdOperatorTable.GREATER_THAN,
+                      b.field("SAL"),
+                      b.literal(2000)),
+                  b.call(SqlStdOperatorTable.EQUALS,
+                      b.field("DEPTNO"),
+                      b.literal(10))))
+          .project(b.field("EMPNO"), b.field("SAL"));
+
+      // Query 2
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.AND,
+                  b.call(SqlStdOperatorTable.GREATER_THAN,
+                      b.field("SAL"),
+                      b.literal(2000)),
+                  b.call(SqlStdOperatorTable.EQUALS,
+                      b.field("DEPTNO"),
+                      b.literal(10))))
+          .project(b.field("ENAME"), b.field("JOB"));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testSharedFilterWithDifferentProjections() {
+    // Three queries sharing same filter but different projections
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Shared filter: EMP WHERE SAL BETWEEN 1000 AND 3000
+      // Query 1: Count
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.AND,
+                  b.call(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
+                      b.field("SAL"),
+                      b.literal(1000)),
+                  b.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+                      b.field("SAL"),
+                      b.literal(3000))))
+          .aggregate(b.groupKey(), b.count(false, "CNT"));
+
+      // Query 2: Average salary
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.AND,
+                  b.call(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
+                      b.field("SAL"),
+                      b.literal(1000)),
+                  b.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+                      b.field("SAL"),
+                      b.literal(3000))))
+          .aggregate(b.groupKey(), b.avg(false, "AVG_SAL", b.field("SAL")));
+
+      // Query 3: List of names
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.AND,
+                  b.call(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
+                      b.field("SAL"),
+                      b.literal(1000)),
+                  b.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+                      b.field("SAL"),
+                      b.literal(3000))))
+          .project(b.field("ENAME"), b.field("SAL"));
+
+      return b.combine(3).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testSharedAggregationBase() {
+    // Two queries that could share aggregation computation
+    // Query 1: SELECT DEPTNO, SUM(SAL), COUNT(*) FROM EMP GROUP BY DEPTNO
+    // Query 2: SELECT DEPTNO, SUM(SAL) FROM EMP GROUP BY DEPTNO WHERE 
SUM(SAL) > 10000
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: Basic aggregation
+      b.scan("EMP")
+          .aggregate(
+              b.groupKey("DEPTNO"),
+              b.sum(false, "TOTAL_SAL", b.field("SAL")));
+
+      // Query 2: Same aggregation with HAVING clause
+      b.scan("EMP")
+          .aggregate(
+              b.groupKey("DEPTNO"),
+              b.sum(false, "TOTAL_SAL", b.field("SAL")))
+          .filter(
+              b.call(SqlStdOperatorTable.GREATER_THAN,
+                  b.field("TOTAL_SAL"),
+                  b.literal(10000)));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testSharedJoinThenAggregation() {
+    // Queries sharing join followed by different aggregations
+    // Query 1: Total salary by department
+    // Query 2: Employee count by location
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: SELECT D.DNAME, SUM(E.SAL) FROM EMP E JOIN DEPT D ... GROUP 
BY D.DNAME
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .aggregate(
+              b.groupKey("DNAME"),
+              b.sum(false, "TOTAL_SAL", b.field("SAL")));
+
+      // Query 2: SELECT D.LOC, COUNT(*) FROM EMP E JOIN DEPT D ... GROUP BY 
D.LOC
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .aggregate(
+              b.groupKey("LOC"),
+              b.count(false, "EMP_CNT"));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testSharedFilterJoinAggregate() {
+    // Complex pattern: Filter -> Join -> different aggregations
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: High earners by department with count
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.GREATER_THAN,
+                  b.field("SAL"),
+                  b.literal(2000)))
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .aggregate(
+              b.groupKey("DNAME"),
+              b.count(false, "HIGH_EARNER_CNT"));
+
+      // Query 2: High earners by department with average
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.GREATER_THAN,
+                  b.field("SAL"),
+                  b.literal(2000)))
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .aggregate(
+              b.groupKey("DNAME"),
+              b.avg(false, "AVG_HIGH_SAL", b.field("SAL")));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  // ========== Tests WITHOUT Shared Expressions (No Trivial Equivalence) 
==========
+
+  @Test void testNoSharedExpressionsDifferentTables() {
+    // Two queries on completely different tables - no sharing possible
+    // Query 1: SELECT * FROM EMP
+    // Query 2: SELECT * FROM SALGRADE
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: EMP table
+      b.scan("EMP")
+          .project(b.field("EMPNO"), b.field("ENAME"), b.field("SAL"));
+
+      // Query 2: SALGRADE table (completely unrelated)
+      b.scan("SALGRADE")
+          .project(b.field("GRADE"), b.field("LOSAL"), b.field("HISAL"));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testNoSharedExpressionsDifferentFilters() {
+    // Two queries on same table but with non-overlapping filters - no sharing
+    // Query 1: SELECT EMPNO FROM EMP WHERE DEPTNO = 10
+    // Query 2: SELECT ENAME FROM EMP WHERE JOB = 'CLERK'
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: Filter on DEPTNO
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.EQUALS,
+              b.field("DEPTNO"),
+              b.literal(10)))
+          .project(b.field("EMPNO"));
+
+      // Query 2: Filter on JOB (different filter, not shareable)
+      b.scan("EMP")
+          .filter(
+              b.call(SqlStdOperatorTable.EQUALS,
+              b.field("JOB"),
+              b.literal("CLERK")))
+          .project(b.field("ENAME"));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testNoSharedExpressionsDifferentJoins() {
+    // Two queries with different join conditions - no sharing
+    // Query 1: EMP JOIN DEPT ON DEPTNO
+    // Query 2: EMP JOIN SALGRADE ON SAL between LOSAL and HISAL
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: EMP-DEPT join
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .project(b.field("ENAME"), b.field("DNAME"));
+
+      // Query 2: EMP-SALGRADE join (completely different join)
+      b.scan("EMP")
+          .scan("SALGRADE")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.AND,
+                  b.call(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
+                      b.field(2, 0, "SAL"),
+                      b.field(2, 1, "LOSAL")),
+                  b.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+                      b.field(2, 0, "SAL"),
+                      b.field(2, 1, "HISAL"))))
+          .project(b.field("ENAME"), b.field("GRADE"));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testNoSharedExpressionsDifferentGroupByKeys() {
+    // Two queries with different GROUP BY keys - no sharing
+    // Query 1: GROUP BY DEPTNO
+    // Query 2: GROUP BY JOB
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: Group by DEPTNO
+      b.scan("EMP")
+          .aggregate(
+              b.groupKey("DEPTNO"),
+              b.count(false, "CNT"));
+
+      // Query 2: Group by JOB (different key, not shareable)
+      b.scan("EMP")
+          .aggregate(
+              b.groupKey("JOB"),
+              b.count(false, "CNT"));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+
+  @Test void testNoSharedExpressionsDifferentJoinTypes() {
+    // Two queries with same tables but different join types - no sharing
+    // Query 1: EMP INNER JOIN DEPT
+    // Query 2: EMP FULL OUTER JOIN DEPT
+    final Function<RelBuilder, RelNode> relFn = b -> {
+      // Query 1: Inner join
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.INNER,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .project(b.field("EMPNO"), b.field("DNAME"));
+
+      // Query 2: Full outer join (different join type)
+      b.scan("EMP")
+          .scan("DEPT")
+          .join(JoinRelType.FULL,
+              b.call(SqlStdOperatorTable.EQUALS,
+                  b.field(2, 0, "DEPTNO"),
+                  b.field(2, 1, "DEPTNO")))
+          .project(b.field("EMPNO"), b.field("DNAME"));
+
+      return b.combine(2).build();
+    };
+
+    relFn(relFn)
+        .withVolcanoPlanner(false, planner -> {
+          planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+          RelOptUtil.registerDefaultRules(planner, false, false);
+          
planner.addRule(CombineSimpleEquivalenceRule.Config.DEFAULT.toRule());
+        })
+        .check();
+  }
+}
diff --git a/core/src/test/java/org/apache/calcite/test/SqlFunctionsTest.java 
b/core/src/test/java/org/apache/calcite/test/SqlFunctionsTest.java
index 043c23b105..4c5f6cc2b7 100644
--- a/core/src/test/java/org/apache/calcite/test/SqlFunctionsTest.java
+++ b/core/src/test/java/org/apache/calcite/test/SqlFunctionsTest.java
@@ -85,6 +85,7 @@
 import static org.hamcrest.CoreMatchers.nullValue;
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.hasToString;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -2057,4 +2058,36 @@ private long sqlTimestamp(String str) {
     assertArrayEquals(new byte[]{(byte) 0x80, (byte) 0x00},
         SqlFunctions.leftShift(new byte[]{(byte) 0x40, (byte) 0x00}, 1));
   }
+
+  @Test void testCombineQueryResults() {
+    // Test combining two equal-length lists
+    List<Integer> list1 = Arrays.asList(1, 2, 3);
+    List<Integer> list2 = Arrays.asList(10, 20, 30);
+    List<Object[]> result = SqlFunctions.combineQueryResults(new List[]{list1, 
list2});
+
+    assertThat(result, hasSize(3));
+    assertArrayEquals(new Object[]{1, 10}, result.get(0));
+    assertArrayEquals(new Object[]{2, 20}, result.get(1));
+    assertArrayEquals(new Object[]{3, 30}, result.get(2));
+
+    // Test combining lists of different lengths (shorter list padded with 
nulls)
+    List<String> listA = Arrays.asList("a", "b");
+    List<String> listB = Arrays.asList("x", "y", "z", "w");
+    result = SqlFunctions.combineQueryResults(new List[]{listA, listB});
+
+    assertThat(result, hasSize(4));
+    assertArrayEquals(new Object[]{"a", "x"}, result.get(0));
+    assertArrayEquals(new Object[]{"b", "y"}, result.get(1));
+    assertArrayEquals(new Object[]{null, "z"}, result.get(2));
+    assertArrayEquals(new Object[]{null, "w"}, result.get(3));
+
+    // Test with empty list
+    List<Integer> emptyList = Collections.emptyList();
+    List<Integer> nonEmpty = Arrays.asList(100, 200);
+    result = SqlFunctions.combineQueryResults(new List[]{emptyList, nonEmpty});
+
+    assertThat(result, hasSize(2));
+    assertArrayEquals(new Object[]{null, 100}, result.get(0));
+    assertArrayEquals(new Object[]{null, 200}, result.get(1));
+  }
 }
diff --git 
a/core/src/test/java/org/apache/calcite/test/enumerable/EnumerableCombineTest.java
 
b/core/src/test/java/org/apache/calcite/test/enumerable/EnumerableCombineTest.java
new file mode 100644
index 0000000000..70c3ed4ec0
--- /dev/null
+++ 
b/core/src/test/java/org/apache/calcite/test/enumerable/EnumerableCombineTest.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.test.enumerable;
+
+import org.apache.calcite.adapter.java.ReflectiveSchema;
+import org.apache.calcite.config.CalciteConnectionProperty;
+import org.apache.calcite.config.Lex;
+import org.apache.calcite.test.CalciteAssert;
+import org.apache.calcite.test.schemata.hr.HrSchema;
+
+import org.junit.jupiter.api.Test;
+
+/**
+ * Unit tests for {@link 
org.apache.calcite.adapter.enumerable.EnumerableCombine}.
+ */
+class EnumerableCombineTest {
+
+  /**
+   * Test that executes two simple queries combined.
+   * Query 1: Select employee names from department 10
+   * Query 2: Select department names
+   *
+   * <p>The Combine operator returns results in a wide format where each query
+   * is a column (QUERY_0, QUERY_1, etc.) and each row contains a struct (map)
+   * for each query. Queries with fewer rows have null values for additional 
rows.
+   */
+  @Test void testCombineTwoQueries() {
+    tester(new HrSchema())
+        .withRel(
+            builder -> {
+              // Query 1: SELECT name FROM emps WHERE deptno = 10
+              builder.scan("s", "emps")
+                  .filter(
+                      builder.equals(
+                          builder.field("deptno"),
+                          builder.literal(10)))
+                  .project(builder.field("name"));
+
+              // Query 2: SELECT name FROM depts
+              builder.scan("s", "depts")
+                  .project(builder.field("name"));
+
+              // Combine both queries
+              return builder.combine(2).build();
+            })
+        .returnsOrdered(
+            "QUERY_0={name=Bill}; QUERY_1={name=Sales}",
+            "QUERY_0={name=Sebastian}; QUERY_1={name=Marketing}",
+            "QUERY_0={name=Theodore}; QUERY_1={name=HR}");
+  }
+
+  /**
+   * Test that executes two queries with different row counts.
+   * Query 1: Select all employee names (4 rows)
+   * Query 2: Select all department names (3 rows)
+   *
+   * <p>Since Query 2 returns fewer rows, QUERY1 is null for the extra rows.
+   */
+  @Test void testCombineDifferentRowCounts() {
+    tester(new HrSchema())
+        .withRel(
+            builder -> {
+              // Query 1: SELECT name FROM emps (4 rows)
+              builder.scan("s", "emps")
+                  .project(builder.field("name"));
+
+              // Query 2: SELECT name FROM depts (3 rows)
+              builder.scan("s", "depts")
+                  .project(builder.field("name"));
+
+              // Combine both queries
+              return builder.combine(2).build();
+            })
+        .returnsUnordered(
+            "QUERY_0={name=Bill}; QUERY_1={name=Sales}",
+            "QUERY_0={name=Eric}; QUERY_1={name=Marketing}",
+            "QUERY_0={name=Sebastian}; QUERY_1={name=HR}",
+            "QUERY_0={name=Theodore}; QUERY_1=null");
+  }
+
+  /**
+   * Test that executes two queries with multiple columns each.
+   * Query 1: Select empid and name from employees in department 10
+   * Query 2: Select deptno and name from departments
+   */
+  @Test void testCombineMultipleColumns() {
+    tester(new HrSchema())
+        .withRel(
+            builder -> {
+              // Query 1: SELECT empid, name FROM emps WHERE deptno = 10
+              builder.scan("s", "emps")
+                  .filter(
+                      builder.equals(
+                          builder.field("deptno"),
+                          builder.literal(10)))
+                  .project(
+                      builder.field("empid"),
+                      builder.field("name"));
+
+              // Query 2: SELECT deptno, name FROM depts
+              builder.scan("s", "depts")
+                  .project(
+                      builder.field("deptno"),
+                      builder.field("name"));
+
+              // Combine both queries
+              return builder.combine(2).build();
+            })
+        .returnsUnordered(
+            "QUERY_0={empid=100, name=Bill}; QUERY_1={deptno=10, name=Sales}",
+            "QUERY_0={empid=150, name=Sebastian}; QUERY_1={deptno=30, 
name=Marketing}",
+            "QUERY_0={empid=110, name=Theodore}; QUERY_1={deptno=40, 
name=HR}");
+  }
+
+  /**
+   * Test that executes two queries returning different numbers of rows.
+   * Query 1: Select name from depts (3 rows)
+   * Query 2: Select empid, name, deptno from emps where deptno = 10 (3 rows)
+   */
+  @Test void testCombineDifferentColumnCounts() {
+    tester(new HrSchema())
+        .withRel(
+            builder -> {
+              // Query 1: SELECT name FROM depts (1 column)
+              builder.scan("s", "depts")
+                  .project(builder.field("name"));
+
+              // Query 2: SELECT empid, name, deptno FROM emps WHERE deptno = 
10 (3 columns)
+              builder.scan("s", "emps")
+                  .filter(
+                      builder.equals(
+                          builder.field("deptno"),
+                          builder.literal(10)))
+                  .project(
+                      builder.field("empid"),
+                      builder.field("name"),
+                      builder.field("deptno"));
+
+              // Combine both queries
+              return builder.combine(2).build();
+            })
+        .returnsUnordered(
+            "QUERY_0={name=Sales}; QUERY_1={empid=100, name=Bill, deptno=10}",
+            "QUERY_0={name=Marketing}; QUERY_1={empid=150, name=Sebastian, 
deptno=10}",
+            "QUERY_0={name=HR}; QUERY_1={empid=110, name=Theodore, 
deptno=10}");
+  }
+
+  private CalciteAssert.AssertThat tester(Object schema) {
+    return CalciteAssert.that()
+        .with(CalciteConnectionProperty.LEX, Lex.JAVA)
+        .withSchema("s", new ReflectiveSchema(schema));
+  }
+}
diff --git 
a/core/src/test/resources/org/apache/calcite/test/CombineRelOptRulesTest.xml 
b/core/src/test/resources/org/apache/calcite/test/CombineRelOptRulesTest.xml
new file mode 100644
index 0000000000..1ae0496621
--- /dev/null
+++ b/core/src/test/resources/org/apache/calcite/test/CombineRelOptRulesTest.xml
@@ -0,0 +1,371 @@
+<?xml version="1.0" ?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to you under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~ http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<Root>
+  <TestCase name="testNoSharedExpressionsDifferentFilters">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalProject(EMPNO=[$0])
+    LogicalFilter(condition=[=($7, 10)])
+      LogicalTableScan(table=[[scott, EMP]])
+  LogicalProject(ENAME=[$1])
+    LogicalFilter(condition=[=($2, 'CLERK')])
+      LogicalTableScan(table=[[scott, EMP]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableProject(EMPNO=[$0])
+    EnumerableFilter(condition=[=($7, 10)])
+      EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableProject(ENAME=[$1])
+    EnumerableFilter(condition=[=($2, 'CLERK')])
+      EnumerableTableScan(table=[[scott, EMP]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testNoSharedExpressionsDifferentGroupByKeys">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalAggregate(group=[{7}], CNT=[COUNT()])
+    LogicalTableScan(table=[[scott, EMP]])
+  LogicalAggregate(group=[{2}], CNT=[COUNT()])
+    LogicalTableScan(table=[[scott, EMP]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableAggregate(group=[{7}], CNT=[COUNT()])
+    EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableAggregate(group=[{2}], CNT=[COUNT()])
+    EnumerableTableScan(table=[[scott, EMP]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testNoSharedExpressionsDifferentJoinTypes">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalProject(EMPNO=[$0], DNAME=[$9])
+    LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+      LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+  LogicalProject(EMPNO=[$0], DNAME=[$9])
+    LogicalJoin(condition=[=($7, $8)], joinType=[full])
+      LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableProject(EMPNO=[$3], DNAME=[$1])
+    EnumerableHashJoin(condition=[=($0, $10)], joinType=[inner])
+      EnumerableTableScan(table=[[scott, DEPT]])
+      EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableProject(EMPNO=[$0], DNAME=[$9])
+    EnumerableHashJoin(condition=[=($7, $8)], joinType=[full])
+      EnumerableTableScan(table=[[scott, EMP]])
+      EnumerableTableScan(table=[[scott, DEPT]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testNoSharedExpressionsDifferentJoins">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalProject(ENAME=[$1], DNAME=[$9])
+    LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+      LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+  LogicalProject(ENAME=[$1], GRADE=[$8])
+    LogicalJoin(condition=[AND(>=($5, $9), <=($5, $10))], joinType=[inner])
+      LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, SALGRADE]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableProject(ENAME=[$4], DNAME=[$1])
+    EnumerableHashJoin(condition=[=($0, $10)], joinType=[inner])
+      EnumerableTableScan(table=[[scott, DEPT]])
+      EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableProject(ENAME=[$1], GRADE=[$8])
+    EnumerableNestedLoopJoin(condition=[AND(>=($5, $9), <=($5, $10))], 
joinType=[inner])
+      EnumerableTableScan(table=[[scott, EMP]])
+      EnumerableTableScan(table=[[scott, SALGRADE]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testNoSharedExpressionsDifferentTables">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalProject(EMPNO=[$0], ENAME=[$1], SAL=[$5])
+    LogicalTableScan(table=[[scott, EMP]])
+  LogicalTableScan(table=[[scott, SALGRADE]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableProject(EMPNO=[$0], ENAME=[$1], SAL=[$5])
+    EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableTableScan(table=[[scott, SALGRADE]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testSharedAggregationBase">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalAggregate(group=[{7}], TOTAL_SAL=[SUM($5)])
+    LogicalTableScan(table=[[scott, EMP]])
+  LogicalFilter(condition=[>($1, 10000)])
+    LogicalAggregate(group=[{7}], TOTAL_SAL=[SUM($5)])
+      LogicalTableScan(table=[[scott, EMP]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableTableSpool(readType=[LAZY], writeType=[LAZY], table=[[TEMP, 
spool_0]])
+    EnumerableAggregate(group=[{7}], TOTAL_SAL=[SUM($5)])
+      EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableFilter(condition=[>($1, 10000)])
+    EnumerableInterpreter
+      BindableTableScan(table=[[TEMP, spool_0]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testSharedComplexJoin">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalAggregate(group=[{9, 11}], EMP_COUNT=[COUNT()])
+    LogicalJoin(condition=[AND(>=($5, $12), <=($5, $13))], joinType=[inner])
+      LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+        LogicalTableScan(table=[[scott, EMP]])
+        LogicalTableScan(table=[[scott, DEPT]])
+      LogicalTableScan(table=[[scott, SALGRADE]])
+  LogicalAggregate(group=[{9, 11}], AVG_SAL=[AVG($5)])
+    LogicalJoin(condition=[AND(>=($5, $12), <=($5, $13))], joinType=[inner])
+      LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+        LogicalTableScan(table=[[scott, EMP]])
+        LogicalTableScan(table=[[scott, DEPT]])
+      LogicalTableScan(table=[[scott, SALGRADE]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableAggregate(group=[{9, 11}], EMP_COUNT=[COUNT()])
+    EnumerableTableSpool(readType=[LAZY], writeType=[LAZY], table=[[TEMP, 
spool_0]])
+      EnumerableNestedLoopJoin(condition=[AND(>=($5, $12), <=($5, $13))], 
joinType=[inner])
+        EnumerableProject(EMPNO=[$3], ENAME=[$4], JOB=[$5], MGR=[$6], 
HIREDATE=[$7], SAL=[$8], COMM=[$9], DEPTNO=[$10], DEPTNO0=[$0], DNAME=[$1], 
LOC=[$2])
+          EnumerableHashJoin(condition=[=($0, $10)], joinType=[inner])
+            EnumerableTableScan(table=[[scott, DEPT]])
+            EnumerableTableScan(table=[[scott, EMP]])
+        EnumerableTableScan(table=[[scott, SALGRADE]])
+  EnumerableProject(DNAME=[$0], GRADE=[$1], AVG_SAL=[CAST(/(CAST(CASE(=($3, 
0), null:DECIMAL(19, 2), $2)):DECIMAL(7, 2), $3)):DECIMAL(7, 2)])
+    EnumerableAggregate(group=[{9, 11}], agg#0=[$SUM0($5)], agg#1=[COUNT($5)])
+      EnumerableInterpreter
+        BindableTableScan(table=[[TEMP, spool_0]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testSharedFilter">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalProject(EMPNO=[$0], SAL=[$5])
+    LogicalFilter(condition=[AND(>($5, 2000), =($7, 10))])
+      LogicalTableScan(table=[[scott, EMP]])
+  LogicalProject(ENAME=[$1], JOB=[$2])
+    LogicalFilter(condition=[AND(>($5, 2000), =($7, 10))])
+      LogicalTableScan(table=[[scott, EMP]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableProject(EMPNO=[$0], SAL=[$5])
+    EnumerableTableSpool(readType=[LAZY], writeType=[LAZY], table=[[TEMP, 
spool_0]])
+      EnumerableFilter(condition=[AND(>($5, 2000), =($7, 10))])
+        EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableProject(ENAME=[$1], JOB=[$2])
+    EnumerableInterpreter
+      BindableTableScan(table=[[TEMP, spool_0]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testSharedFilterJoinAggregate">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalAggregate(group=[{9}], HIGH_EARNER_CNT=[COUNT()])
+    LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+      LogicalFilter(condition=[>($5, 2000)])
+        LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+  LogicalAggregate(group=[{9}], AVG_HIGH_SAL=[AVG($5)])
+    LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+      LogicalFilter(condition=[>($5, 2000)])
+        LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableAggregate(group=[{9}], HIGH_EARNER_CNT=[COUNT()])
+    EnumerableTableSpool(readType=[LAZY], writeType=[LAZY], table=[[TEMP, 
spool_0]])
+      EnumerableProject(EMPNO=[$3], ENAME=[$4], JOB=[$5], MGR=[$6], 
HIREDATE=[$7], SAL=[$8], COMM=[$9], DEPTNO=[$10], DEPTNO0=[$0], DNAME=[$1], 
LOC=[$2])
+        EnumerableHashJoin(condition=[=($0, $10)], joinType=[inner])
+          EnumerableTableScan(table=[[scott, DEPT]])
+          EnumerableFilter(condition=[>($5, 2000)])
+            EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableProject(DNAME=[$0], AVG_HIGH_SAL=[CAST(/(CAST(CASE(=($2, 0), 
null:DECIMAL(19, 2), $1)):DECIMAL(7, 2), $2)):DECIMAL(7, 2)])
+    EnumerableAggregate(group=[{9}], agg#0=[$SUM0($5)], agg#1=[COUNT($5)])
+      EnumerableInterpreter
+        BindableTableScan(table=[[TEMP, spool_0]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testSharedFilterWithDifferentProjections">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalAggregate(group=[{}], CNT=[COUNT()])
+    LogicalFilter(condition=[SEARCH($5, Sarg[[1000..3000]])])
+      LogicalTableScan(table=[[scott, EMP]])
+  LogicalAggregate(group=[{}], AVG_SAL=[AVG($5)])
+    LogicalFilter(condition=[SEARCH($5, Sarg[[1000..3000]])])
+      LogicalTableScan(table=[[scott, EMP]])
+  LogicalProject(ENAME=[$1], SAL=[$5])
+    LogicalFilter(condition=[SEARCH($5, Sarg[[1000..3000]])])
+      LogicalTableScan(table=[[scott, EMP]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableAggregate(group=[{}], CNT=[COUNT()])
+    EnumerableTableSpool(readType=[LAZY], writeType=[LAZY], table=[[TEMP, 
spool_0]])
+      EnumerableFilter(condition=[SEARCH($5, Sarg[[1000..3000]])])
+        EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableProject(AVG_SAL=[CAST(/(CAST(CASE(=($1, 0), null:DECIMAL(19, 2), 
$0)):DECIMAL(7, 2), $1)):DECIMAL(7, 2)])
+    EnumerableAggregate(group=[{}], agg#0=[$SUM0($5)], agg#1=[COUNT($5)])
+      EnumerableInterpreter
+        BindableTableScan(table=[[TEMP, spool_0]])
+  EnumerableProject(ENAME=[$1], SAL=[$5])
+    EnumerableInterpreter
+      BindableTableScan(table=[[TEMP, spool_0]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testSharedJoin">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalProject(EMPNO=[$0], DNAME=[$9])
+    LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+      LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+  LogicalProject(ENAME=[$1], LOC=[$10])
+    LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+      LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableProject(EMPNO=[$0], DNAME=[$9])
+    EnumerableTableSpool(readType=[LAZY], writeType=[LAZY], table=[[TEMP, 
spool_0]])
+      EnumerableProject(EMPNO=[$3], ENAME=[$4], JOB=[$5], MGR=[$6], 
HIREDATE=[$7], SAL=[$8], COMM=[$9], DEPTNO=[$10], DEPTNO0=[$0], DNAME=[$1], 
LOC=[$2])
+        EnumerableHashJoin(condition=[=($0, $10)], joinType=[inner])
+          EnumerableTableScan(table=[[scott, DEPT]])
+          EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableProject(ENAME=[$1], LOC=[$10])
+    EnumerableInterpreter
+      BindableTableScan(table=[[TEMP, spool_0]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testSharedJoinThenAggregation">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalAggregate(group=[{9}], TOTAL_SAL=[SUM($5)])
+    LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+      LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+  LogicalAggregate(group=[{10}], EMP_CNT=[COUNT()])
+    LogicalJoin(condition=[=($7, $8)], joinType=[inner])
+      LogicalTableScan(table=[[scott, EMP]])
+      LogicalTableScan(table=[[scott, DEPT]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableAggregate(group=[{9}], TOTAL_SAL=[SUM($5)])
+    EnumerableTableSpool(readType=[LAZY], writeType=[LAZY], table=[[TEMP, 
spool_0]])
+      EnumerableProject(EMPNO=[$3], ENAME=[$4], JOB=[$5], MGR=[$6], 
HIREDATE=[$7], SAL=[$8], COMM=[$9], DEPTNO=[$10], DEPTNO0=[$0], DNAME=[$1], 
LOC=[$2])
+        EnumerableHashJoin(condition=[=($0, $10)], joinType=[inner])
+          EnumerableTableScan(table=[[scott, DEPT]])
+          EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableAggregate(group=[{10}], EMP_CNT=[COUNT()])
+    EnumerableInterpreter
+      BindableTableScan(table=[[TEMP, spool_0]])
+]]>
+    </Resource>
+  </TestCase>
+  <TestCase name="testSharedMultipleFilters">
+    <Resource name="planBefore">
+      <![CDATA[
+Combine
+  LogicalAggregate(group=[{}], CNT=[COUNT()])
+    LogicalFilter(condition=[=($7, 20)])
+      LogicalFilter(condition=[>($5, 1500)])
+        LogicalTableScan(table=[[scott, EMP]])
+  LogicalProject(EMPNO=[$0], ENAME=[$1])
+    LogicalFilter(condition=[=($7, 20)])
+      LogicalFilter(condition=[>($5, 1500)])
+        LogicalTableScan(table=[[scott, EMP]])
+]]>
+    </Resource>
+    <Resource name="planAfter">
+      <![CDATA[
+EnumerableCombine
+  EnumerableAggregate(group=[{}], CNT=[COUNT()])
+    EnumerableTableSpool(readType=[LAZY], writeType=[LAZY], table=[[TEMP, 
spool_0]])
+      EnumerableFilter(condition=[AND(>($5, 1500), =($7, 20))])
+        EnumerableTableScan(table=[[scott, EMP]])
+  EnumerableProject(EMPNO=[$0], ENAME=[$1])
+    EnumerableInterpreter
+      BindableTableScan(table=[[TEMP, spool_0]])
+]]>
+    </Resource>
+  </TestCase>
+</Root>

Reply via email to