lgbo-ustc commented on issue #7647:
URL:
https://github.com/apache/incubator-gluten/issues/7647#issuecomment-2437045925
Some classic examples
```sql
0: jdbc:hive2://localhost:10000>
explain select
n_regionkey, n_nationkey, sum(n_regionkey), count(n_name)
from
tpch_pq.nation
group by n_regionkey, n_nationkey with cube order by n_regionkey,
n_nationkey;
AdaptiveSparkPlan isFinalPlan=false
+- Sort [n_regionkey#67L ASC NULLS FIRST, n_nationkey#68L ASC NULLS FIRST],
true, 0
+- Exchange rangepartitioning(n_regionkey#67L ASC NULLS FIRST,
n_nationkey#68L ASC NULLS FIRST, 5), ENSURE_REQUIREMENTS, [plan_id=384]
+- HashAggregate(keys=[n_regionkey#67L, n_nationkey#68L,
spark_grouping_id#66L], functions=[sum(n_regionkey#2L), count(n_name#1)])
+- Exchange hashpartitioning(n_regionkey#67L, n_nationkey#68L,
spark_grouping_id#66L, 5), ENSURE_REQUIREMENTS, [plan_id=381]
+- HashAggregate(keys=[n_regionkey#67L, n_nationkey#68L,
spark_grouping_id#66L], functions=[partial_sum(n_regionkey#2L),
partial_count(n_name#1)])
+- Expand [[n_name#1, n_regionkey#2L, n_regionkey#2L,
n_nationkey#0L, 0], [n_name#1, n_regionkey#2L, n_regionkey#2L, null, 1],
[n_name#1, n_regionkey#2L, null, n_nationkey#0L, 2], [n_name#1, n_regionkey#2L,
null, null, 3]], [n_name#1, n_regionkey#2L, n_regionkey#67L, n_nationkey#68L,
spark_grouping_id#66L]
+- Project [n_name#1, n_regionkey#2L, n_regionkey#2L,
n_nationkey#0L]
+- FileScan parquet
tpch_pq.nation[n_nationkey#0L,n_name#1,n_regionkey#2L]
```
```sql
0: jdbc:hive2://localhost:10000> explain select * from (select n_regionkey,
n_nationkey, sum(n_regionkey), count(n_name) from tpch_pq.nation group by
n_regionkey, n_nationkey with cube order by n_regionkey, n_nationkey ) where
n_nationkey = 212;
AdaptiveSparkPlan isFinalPlan=false
+- Sort [n_regionkey#67L ASC NULLS FIRST, n_nationkey#68L ASC NULLS FIRST],
true, 0
+- Exchange rangepartitioning(n_regionkey#67L ASC NULLS FIRST,
n_nationkey#68L ASC NULLS FIRST, 5), ENSURE_REQUIREMENTS, [plan_id=384]
+- HashAggregate(keys=[n_regionkey#67L, n_nationkey#68L,
spark_grouping_id#66L], functions=[sum(n_regionkey#2L), count(n_name#1)])
+- Exchange hashpartitioning(n_regionkey#67L, n_nationkey#68L,
spark_grouping_id#66L, 5), ENSURE_REQUIREMENTS, [plan_id=381]
+- HashAggregate(keys=[n_regionkey#67L, n_nationkey#68L,
spark_grouping_id#66L], functions=[partial_sum(n_regionkey#2L),
partial_count(n_name#1)])
+- Filter (isnotnull(n_nationkey#68L) AND (n_nationkey#68L =
cast(212 as bigint)))
+- Expand [[n_name#1, n_regionkey#2L, n_regionkey#2L,
n_nationkey#0L, 0], [n_name#1, n_regionkey#2L, n_regionkey#2L, null, 1],
[n_name#1, n_regionkey#2L, null, n_nationkey#0L, 2], [n_name#1, n_regionkey#2L,
null, null, 3]], [n_name#1, n_regionkey#2L, n_regionkey#67L, n_nationkey#68L,
spark_grouping_id#66L]
+- Project [n_name#1, n_regionkey#2L, n_regionkey#2L,
n_nationkey#0L]
+- FileScan parquet
tpch_pq.nation[n_nationkey#0L,n_name#1,n_regionkey#2L]
```
```sql
0: jdbc:hive2://localhost:10000> explain select n_regionkey, n_nationkey,
sum(distinct n_regionkey), count(distinct n_name) from tpch_pq.nation group by
n_regionkey, n_nationkey with cube order by n_regionkey, n_nationkey;
AdaptiveSparkPlan isFinalPlan=false
+- Sort [n_regionkey#107L ASC NULLS FIRST, n_nationkey#108L ASC NULLS
FIRST], true, 0
+- Exchange rangepartitioning(n_regionkey#107L ASC NULLS FIRST,
n_nationkey#108L ASC NULLS FIRST, 5), ENSURE_REQUIREMENTS, [plan_id=475]
+- HashAggregate(keys=[n_regionkey#107L, n_nationkey#108L,
spark_grouping_id#106L],
functions=[sum(spark_catalog.tpch_pq.nation.n_regionkey#110L),
count(spark_catalog.tpch_pq.nation.n_name#111)])
+- Exchange hashpartitioning(n_regionkey#107L, n_nationkey#108L,
spark_grouping_id#106L, 5), ENSURE_REQUIREMENTS, [plan_id=472]
+- HashAggregate(keys=[n_regionkey#107L, n_nationkey#108L,
spark_grouping_id#106L],
functions=[partial_sum(spark_catalog.tpch_pq.nation.n_regionkey#110L) FILTER
(WHERE (gid#109 = 1)), partial_count(spark_catalog.tpch_pq.nation.n_name#111)
FILTER (WHERE (gid#109 = 2))])
+- HashAggregate(keys=[n_regionkey#107L, n_nationkey#108L,
spark_grouping_id#106L, spark_catalog.tpch_pq.nation.n_regionkey#110L,
spark_catalog.tpch_pq.nation.n_name#111, gid#109], functions=[])
+- Exchange hashpartitioning(n_regionkey#107L,
n_nationkey#108L, spark_grouping_id#106L,
spark_catalog.tpch_pq.nation.n_regionkey#110L,
spark_catalog.tpch_pq.nation.n_name#111, gid#109, 5), ENSURE_REQUIREMENTS,
[plan_id=468]
+- HashAggregate(keys=[n_regionkey#107L,
n_nationkey#108L, spark_grouping_id#106L,
spark_catalog.tpch_pq.nation.n_regionkey#110L,
spark_catalog.tpch_pq.nation.n_name#111, gid#109], functions=[])
+- Expand [[n_regionkey#107L, n_nationkey#108L,
spark_grouping_id#106L, n_regionkey#2L, null, 1], [n_regionkey#107L,
n_nationkey#108L, spark_grouping_id#106L, null, n_name#1, 2]],
[n_regionkey#107L, n_nationkey#108L, spark_grouping_id#106L,
spark_catalog.tpch_pq.nation.n_regionkey#110L,
spark_catalog.tpch_pq.nation.n_name#111, gid#109]
+- Expand [[n_name#1, n_regionkey#2L,
n_regionkey#2L, n_nationkey#0L, 0], [n_name#1, n_regionkey#2L, n_regionkey#2L,
null, 1], [n_name#1, n_regionkey#2L, null, n_nationkey#0L, 2], [n_name#1,
n_regionkey#2L, null, null, 3]], [n_name#1, n_regionkey#2L, n_regionkey#107L,
n_nationkey#108L, spark_grouping_id#106L]
+- Project [n_name#1, n_regionkey#2L,
n_regionkey#2L, n_nationkey#0L]
+- FileScan parquet
tpch_pq.nation[n_nationkey#0L,n_name#1,n_regionkey#2L
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]