This is an automated email from the ASF dual-hosted git repository.
gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new c426b7285b58 [SPARK-46941][SQL] Can't insert window group limit node
for top-k computation if contains SizeBasedWindowFunction
c426b7285b58 is described below
commit c426b7285b588924eaa8325cb83c868389e94bc3
Author: zml1206 <[email protected]>
AuthorDate: Fri Feb 2 12:18:49 2024 +0900
[SPARK-46941][SQL] Can't insert window group limit node for top-k
computation if contains SizeBasedWindowFunction
### What changes were proposed in this pull request?
Don't insert window group limit node for top-k computation if contains
`SizeBasedWindowFunction`.
### Why are the changes needed?
Bug fix, Insert window group limit node for top-k computation contains
`SizeBasedWindowFunction` will cause wrong result of the
SizeBasedWindowFunction`.
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
New UT. Before this pr UT will not pass.
### Was this patch authored or co-authored using generative AI tooling?
No.
Closes #44980 from zml1206/SPARK-46941.
Authored-by: zml1206 <[email protected]>
Signed-off-by: Hyukjin Kwon <[email protected]>
---
.../catalyst/optimizer/InferWindowGroupLimit.scala | 11 +++++----
.../optimizer/InferWindowGroupLimitSuite.scala | 18 ++++++++++++++-
.../spark/sql/DataFrameWindowFunctionsSuite.scala | 27 ++++++++++++++++++++++
3 files changed, 50 insertions(+), 6 deletions(-)
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimit.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimit.scala
index 04204c6a2e10..f2e99721e926 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimit.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimit.scala
@@ -17,7 +17,7 @@
package org.apache.spark.sql.catalyst.optimizer
-import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute,
CurrentRow, DenseRank, EqualTo, Expression, GreaterThan, GreaterThanOrEqual,
IntegerLiteral, LessThan, LessThanOrEqual, Literal, NamedExpression,
PredicateHelper, Rank, RowFrame, RowNumber, SpecifiedWindowFrame,
UnboundedPreceding, WindowExpression, WindowSpecDefinition}
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute,
CurrentRow, DenseRank, EqualTo, Expression, GreaterThan, GreaterThanOrEqual,
IntegerLiteral, LessThan, LessThanOrEqual, Literal, NamedExpression,
PredicateHelper, Rank, RowFrame, RowNumber, SizeBasedWindowFunction,
SpecifiedWindowFrame, UnboundedPreceding, WindowExpression,
WindowSpecDefinition}
import org.apache.spark.sql.catalyst.plans.logical.{Filter, Limit,
LocalRelation, LogicalPlan, Window, WindowGroupLimit}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.TreePattern.{FILTER, WINDOW}
@@ -53,13 +53,14 @@ object InferWindowGroupLimit extends Rule[LogicalPlan] with
PredicateHelper {
}
/**
- * All window expressions should use the same expanding window, so that
- * we can safely do the early stop.
+ * All window expressions should use the same expanding window and do not
contains
+ * `SizeBasedWindowFunction`, so that we can safely do the early stop.
*/
private def isExpandingWindow(
windowExpression: NamedExpression): Boolean = windowExpression match {
- case Alias(WindowExpression(_, WindowSpecDefinition(_, _,
- SpecifiedWindowFrame(RowFrame, UnboundedPreceding, CurrentRow))), _) =>
true
+ case Alias(WindowExpression(windowFunction, WindowSpecDefinition(_, _,
+ SpecifiedWindowFrame(RowFrame, UnboundedPreceding, CurrentRow))), _)
+ if !windowFunction.isInstanceOf[SizeBasedWindowFunction] => true
case _ => false
}
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimitSuite.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimitSuite.scala
index 3b185adabc3f..5aa7a27f65fb 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimitSuite.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimitSuite.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
-import org.apache.spark.sql.catalyst.expressions.{CurrentRow, DenseRank,
Literal, NthValue, NTile, Rank, RowFrame, RowNumber, SpecifiedWindowFrame,
UnboundedPreceding}
+import org.apache.spark.sql.catalyst.expressions.{CurrentRow, DenseRank,
Literal, NthValue, NTile, PercentRank, Rank, RowFrame, RowNumber,
SpecifiedWindowFrame, UnboundedPreceding}
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
@@ -338,4 +338,20 @@ class InferWindowGroupLimitSuite extends PlanTest {
WithoutOptimize.execute(correctAnswer1.analyze))
}
}
+
+ test("SPARK-46941: Can't Insert window group limit node for top-k
computation if contains " +
+ "SizeBasedWindowFunction") {
+ val originalQuery =
+ testRelation
+ .select(a, b, c,
+ windowExpr(Rank(c :: Nil),
+ windowSpec(a :: Nil, c.desc :: Nil, windowFrame)).as("rank"),
+ windowExpr(PercentRank(c :: Nil),
+ windowSpec(a :: Nil, c.desc :: Nil,
windowFrame)).as("percent_rank"))
+ .where(Symbol("rank") < 2)
+
+ comparePlans(
+ Optimize.execute(originalQuery.analyze),
+ WithoutOptimize.execute(originalQuery.analyze))
+ }
}
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
index 6969c4303e01..4c852711451c 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
@@ -1640,4 +1640,31 @@ class DataFrameWindowFunctionsSuite extends QueryTest
}
}
}
+
+ test("SPARK-46941: Can't insert window group limit node for top-k
computation if contains " +
+ "SizeBasedWindowFunction") {
+ val df = Seq(
+ (1, "Dave", 1, 2020),
+ (2, "Mark", 2, 2020),
+ (3, "Amy", 3, 2020),
+ (4, "Dave", 1, 2021),
+ (5, "Mark", 2, 2021),
+ (6, "Amy", 3, 2021),
+ (7, "John", 4, 2021)).toDF("id", "name", "score", "year")
+
+ val window = Window.partitionBy($"year").orderBy($"score".desc)
+
+ Seq(-1, 100).foreach { threshold =>
+ withSQLConf(SQLConf.WINDOW_GROUP_LIMIT_THRESHOLD.key ->
threshold.toString) {
+ val df2 = df
+ .withColumn("rank", rank().over(window))
+ .withColumn("percent_rank", percent_rank().over(window))
+ .sort($"year")
+ checkAnswer(df2.filter("rank=2"), Seq(
+ Row(2, "Mark", 2, 2020, 2, 0.5),
+ Row(6, "Amy", 3, 2021, 2, 0.3333333333333333)
+ ))
+ }
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]