This is an automated email from the ASF dual-hosted git repository.
hongze pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git
The following commit(s) were added to refs/heads/main by this push:
new 045e33e421 [GLUTEN-7709][CH] Rule constructor simplifications (#7710)
045e33e421 is described below
commit 045e33e4213df6ea2c858cd3c9961605b75178bc
Author: Jiaan Geng <[email protected]>
AuthorDate: Wed Oct 30 10:50:01 2024 +0800
[GLUTEN-7709][CH] Rule constructor simplifications (#7710)
Closes #7709
---
.../org/apache/gluten/backendsapi/clickhouse/CHRuleApi.scala | 9 +++------
.../gluten/extension/CommonSubexpressionEliminateRule.scala | 5 +----
.../gluten/extension/RewriteDateTimestampComparisonRule.scala | 3 +--
.../apache/gluten/extension/RewriteToDateExpresstionRule.scala | 4 +---
4 files changed, 6 insertions(+), 15 deletions(-)
diff --git
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHRuleApi.scala
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHRuleApi.scala
index 4323dc9558..91698d4cde 100644
---
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHRuleApi.scala
+++
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHRuleApi.scala
@@ -53,12 +53,9 @@ private object CHRuleApi {
(spark, parserInterface) => new GlutenCacheFilesSqlParser(spark,
parserInterface))
injector.injectParser(
(spark, parserInterface) => new GlutenClickhouseSqlParser(spark,
parserInterface))
- injector.injectResolutionRule(
- spark => new RewriteToDateExpresstionRule(spark,
spark.sessionState.conf))
- injector.injectResolutionRule(
- spark => new RewriteDateTimestampComparisonRule(spark,
spark.sessionState.conf))
- injector.injectOptimizerRule(
- spark => new CommonSubexpressionEliminateRule(spark,
spark.sessionState.conf))
+ injector.injectResolutionRule(spark => new
RewriteToDateExpresstionRule(spark))
+ injector.injectResolutionRule(spark => new
RewriteDateTimestampComparisonRule(spark))
+ injector.injectOptimizerRule(spark => new
CommonSubexpressionEliminateRule(spark))
injector.injectOptimizerRule(spark =>
CHAggregateFunctionRewriteRule(spark))
injector.injectOptimizerRule(_ => CountDistinctWithoutExpand)
injector.injectOptimizerRule(_ => EqualToRewrite)
diff --git
a/backends-clickhouse/src/main/scala/org/apache/gluten/extension/CommonSubexpressionEliminateRule.scala
b/backends-clickhouse/src/main/scala/org/apache/gluten/extension/CommonSubexpressionEliminateRule.scala
index 52e278b3da..a944f55450 100644
---
a/backends-clickhouse/src/main/scala/org/apache/gluten/extension/CommonSubexpressionEliminateRule.scala
+++
b/backends-clickhouse/src/main/scala/org/apache/gluten/extension/CommonSubexpressionEliminateRule.scala
@@ -24,7 +24,6 @@ import org.apache.spark.sql.catalyst.expressions._
import
org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression,
AggregateFunction}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
-import org.apache.spark.sql.internal.SQLConf
import scala.collection.mutable
@@ -33,9 +32,7 @@ import scala.collection.mutable
// 2. append two options to spark config
// --conf spark.sql.planChangeLog.level=error
// --conf spark.sql.planChangeLog.batches=all
-class CommonSubexpressionEliminateRule(session: SparkSession, conf: SQLConf)
- extends Rule[LogicalPlan]
- with Logging {
+class CommonSubexpressionEliminateRule(spark: SparkSession) extends
Rule[LogicalPlan] with Logging {
private var lastPlan: LogicalPlan = null
diff --git
a/backends-clickhouse/src/main/scala/org/apache/gluten/extension/RewriteDateTimestampComparisonRule.scala
b/backends-clickhouse/src/main/scala/org/apache/gluten/extension/RewriteDateTimestampComparisonRule.scala
index ea92ddec2c..fa8a37ffa2 100644
---
a/backends-clickhouse/src/main/scala/org/apache/gluten/extension/RewriteDateTimestampComparisonRule.scala
+++
b/backends-clickhouse/src/main/scala/org/apache/gluten/extension/RewriteDateTimestampComparisonRule.scala
@@ -23,7 +23,6 @@ import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
-import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
@@ -37,7 +36,7 @@ import org.apache.spark.unsafe.types.UTF8String
// This rule try to make the filter condition into integer comparison, which
is more efficient.
// The above example will be rewritten into
// select * from table where to_unixtime('2023-11-02', 'yyyy-MM-dd') >=
unix_timestamp
-class RewriteDateTimestampComparisonRule(session: SparkSession, conf: SQLConf)
+class RewriteDateTimestampComparisonRule(spark: SparkSession)
extends Rule[LogicalPlan]
with Logging {
diff --git
a/backends-clickhouse/src/main/scala/org/apache/gluten/extension/RewriteToDateExpresstionRule.scala
b/backends-clickhouse/src/main/scala/org/apache/gluten/extension/RewriteToDateExpresstionRule.scala
index 34d162d71f..6e84863304 100644
---
a/backends-clickhouse/src/main/scala/org/apache/gluten/extension/RewriteToDateExpresstionRule.scala
+++
b/backends-clickhouse/src/main/scala/org/apache/gluten/extension/RewriteToDateExpresstionRule.scala
@@ -37,9 +37,7 @@ import org.apache.spark.sql.types._
// Under ch backend, the StringType can be directly converted into DateType,
// and the functions `from_unixtime` and `unix_timestamp` can be optimized
here.
// Optimized result is `to_date(stringType)`
-class RewriteToDateExpresstionRule(session: SparkSession, conf: SQLConf)
- extends Rule[LogicalPlan]
- with Logging {
+class RewriteToDateExpresstionRule(spark: SparkSession) extends
Rule[LogicalPlan] with Logging {
override def apply(plan: LogicalPlan): LogicalPlan = {
if (
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]