This is an automated email from the ASF dual-hosted git repository. wenchen pushed a commit to branch branch-3.5 in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.5 by this push: new eb123a127eba [SPARK-46941][SQL][3.5] Can't insert window group limit node for top-k computation if contains SizeBasedWindowFunction eb123a127eba is described below commit eb123a127ebabd9c146ec3e5955745e6142bc3eb Author: zml1206 <zhuml1...@gmail.com> AuthorDate: Wed Jul 16 17:06:51 2025 +0800 [SPARK-46941][SQL][3.5] Can't insert window group limit node for top-k computation if contains SizeBasedWindowFunction ### What changes were proposed in this pull request? This PR backports #44980 to branch-3.5. Don't insert window group limit node for top-k computation if contains `SizeBasedWindowFunction`. ### Why are the changes needed? Bug fix, Insert window group limit node for top-k computation contains `SizeBasedWindowFunction` will cause wrong result of the SizeBasedWindowFunction`. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? New UT. Before this pr UT will not pass. ### Was this patch authored or co-authored using generative AI tooling? No. Closes #51422 from zml1206/SPARK-46941-3.5. Authored-by: zml1206 <zhuml1...@gmail.com> Signed-off-by: Wenchen Fan <wenc...@databricks.com> --- .../catalyst/optimizer/InferWindowGroupLimit.scala | 11 +++++---- .../optimizer/InferWindowGroupLimitSuite.scala | 18 ++++++++++++++- .../spark/sql/DataFrameWindowFunctionsSuite.scala | 27 ++++++++++++++++++++++ 3 files changed, 50 insertions(+), 6 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimit.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimit.scala index 04204c6a2e10..f2e99721e926 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimit.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimit.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.catalyst.optimizer -import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, CurrentRow, DenseRank, EqualTo, Expression, GreaterThan, GreaterThanOrEqual, IntegerLiteral, LessThan, LessThanOrEqual, Literal, NamedExpression, PredicateHelper, Rank, RowFrame, RowNumber, SpecifiedWindowFrame, UnboundedPreceding, WindowExpression, WindowSpecDefinition} +import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, CurrentRow, DenseRank, EqualTo, Expression, GreaterThan, GreaterThanOrEqual, IntegerLiteral, LessThan, LessThanOrEqual, Literal, NamedExpression, PredicateHelper, Rank, RowFrame, RowNumber, SizeBasedWindowFunction, SpecifiedWindowFrame, UnboundedPreceding, WindowExpression, WindowSpecDefinition} import org.apache.spark.sql.catalyst.plans.logical.{Filter, Limit, LocalRelation, LogicalPlan, Window, WindowGroupLimit} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.catalyst.trees.TreePattern.{FILTER, WINDOW} @@ -53,13 +53,14 @@ object InferWindowGroupLimit extends Rule[LogicalPlan] with PredicateHelper { } /** - * All window expressions should use the same expanding window, so that - * we can safely do the early stop. + * All window expressions should use the same expanding window and do not contains + * `SizeBasedWindowFunction`, so that we can safely do the early stop. */ private def isExpandingWindow( windowExpression: NamedExpression): Boolean = windowExpression match { - case Alias(WindowExpression(_, WindowSpecDefinition(_, _, - SpecifiedWindowFrame(RowFrame, UnboundedPreceding, CurrentRow))), _) => true + case Alias(WindowExpression(windowFunction, WindowSpecDefinition(_, _, + SpecifiedWindowFrame(RowFrame, UnboundedPreceding, CurrentRow))), _) + if !windowFunction.isInstanceOf[SizeBasedWindowFunction] => true case _ => false } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimitSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimitSuite.scala index 5ffb45084184..cfd2146d868c 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimitSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/InferWindowGroupLimitSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst.optimizer import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ -import org.apache.spark.sql.catalyst.expressions.{CurrentRow, DenseRank, Literal, NthValue, NTile, Rank, RowFrame, RowNumber, SpecifiedWindowFrame, UnboundedPreceding} +import org.apache.spark.sql.catalyst.expressions.{CurrentRow, DenseRank, Literal, NthValue, NTile, PercentRank, Rank, RowFrame, RowNumber, SpecifiedWindowFrame, UnboundedPreceding} import org.apache.spark.sql.catalyst.plans.PlanTest import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan} import org.apache.spark.sql.catalyst.rules.RuleExecutor @@ -338,4 +338,20 @@ class InferWindowGroupLimitSuite extends PlanTest { WithoutOptimize.execute(correctAnswer1.analyze)) } } + + test("SPARK-46941: Can't Insert window group limit node for top-k computation if contains " + + "SizeBasedWindowFunction") { + val originalQuery = + testRelation + .select(a, b, c, + windowExpr(Rank(c :: Nil), + windowSpec(a :: Nil, c.desc :: Nil, windowFrame)).as("rank"), + windowExpr(PercentRank(c :: Nil), + windowSpec(a :: Nil, c.desc :: Nil, windowFrame)).as("percent_rank")) + .where(Symbol("rank") < 2) + + comparePlans( + Optimize.execute(originalQuery.analyze), + WithoutOptimize.execute(originalQuery.analyze)) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala index 47a311c71d55..a1d5d5793386 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala @@ -1637,4 +1637,31 @@ class DataFrameWindowFunctionsSuite extends QueryTest } } } + + test("SPARK-46941: Can't insert window group limit node for top-k computation if contains " + + "SizeBasedWindowFunction") { + val df = Seq( + (1, "Dave", 1, 2020), + (2, "Mark", 2, 2020), + (3, "Amy", 3, 2020), + (4, "Dave", 1, 2021), + (5, "Mark", 2, 2021), + (6, "Amy", 3, 2021), + (7, "John", 4, 2021)).toDF("id", "name", "score", "year") + + val window = Window.partitionBy($"year").orderBy($"score".desc) + + Seq(-1, 100).foreach { threshold => + withSQLConf(SQLConf.WINDOW_GROUP_LIMIT_THRESHOLD.key -> threshold.toString) { + val df2 = df + .withColumn("rank", rank().over(window)) + .withColumn("percent_rank", percent_rank().over(window)) + .sort($"year") + checkAnswer(df2.filter("rank=2"), Seq( + Row(2, "Mark", 2, 2020, 2, 0.5), + Row(6, "Amy", 3, 2021, 2, 0.3333333333333333) + )) + } + } + } } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org