wForget commented on code in PR #43869:
URL: https://github.com/apache/spark/pull/43869#discussion_r1400666306


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/dynamicpruning/RowLevelOperationRuntimeGroupFiltering.scala:
##########
@@ -116,6 +117,7 @@ class 
RowLevelOperationRuntimeGroupFiltering(optimizeSubqueries: Rule[LogicalPla
       matchingRowsPlan: LogicalPlan,
       buildKeys: Seq[Attribute],
       pruningKeys: Seq[Attribute]): Expression = {
+    assert(buildKeys.nonEmpty && pruningKeys.nonEmpty)

Review Comment:
   Yes, an error occurred when rerunning newly added test case after removing 
this change:
   
   ```
   [INTERNAL_ERROR] The Spark SQL phase optimization failed with an internal 
error. You hit a bug in Spark or the Spark plugins you use. Please, report this 
bug to the corresponding communities or vendors, and provide the full stack 
trace. SQLSTATE: XX000
   org.apache.spark.SparkException: [INTERNAL_ERROR] The Spark SQL phase 
optimization failed with an internal error. You hit a bug in Spark or the Spark 
plugins you use. Please, report this bug to the corresponding communities or 
vendors, and provide the full stack trace. SQLSTATE: XX000
        at 
org.apache.spark.SparkException$.internalError(SparkException.scala:107)
        at 
org.apache.spark.sql.execution.QueryExecution$.toInternalError(QueryExecution.scala:547)
        at 
org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:559)
        at 
org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:230)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
        at 
org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:229)
        at 
org.apache.spark.sql.execution.QueryExecution.optimizedPlan$lzycompute(QueryExecution.scala:159)
        at 
org.apache.spark.sql.execution.QueryExecution.optimizedPlan(QueryExecution.scala:155)
        at 
org.apache.spark.sql.execution.QueryExecution.assertOptimized(QueryExecution.scala:173)
        at 
org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:193)
        at 
org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:190)
        at 
org.apache.spark.sql.execution.QueryExecution.simpleString(QueryExecution.scala:249)
        at 
org.apache.spark.sql.execution.QueryExecution.org$apache$spark$sql$execution$QueryExecution$$explainString(QueryExecution.scala:295)
        at 
org.apache.spark.sql.execution.QueryExecution.explainString(QueryExecution.scala:263)
        at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:138)
        at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
        at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
        at 
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:118)
        at 
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:109)
        at 
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:442)
        at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:83)
        at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:442)
        at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:33)
        at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
        at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
        at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:33)
        at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:33)
        at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:418)
        at 
org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:109)
        at 
org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:96)
        at 
org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:94)
        at org.apache.spark.sql.Dataset.<init>(Dataset.scala:221)
        at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:101)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
        at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:98)
        at 
org.apache.spark.sql.SparkSession.$anonfun$sql$4(SparkSession.scala:697)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
        at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:688)
        at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:719)
        at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:750)
        at 
org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$sql$1(SQLTestUtils.scala:233)
        at 
org.apache.spark.sql.connector.MergeIntoTableSuiteBase.$anonfun$new$3(MergeIntoTableSuiteBase.scala:48)
        ......
   Caused by: java.lang.AssertionError: assertion failed
        at scala.Predef$.assert(Predef.scala:264)
        at 
org.apache.spark.sql.execution.dynamicpruning.RowLevelOperationRuntimeGroupFiltering.org$apache$spark$sql$execution$dynamicpruning$RowLevelOperationRuntimeGroupFiltering$$buildDynamicPruningCond(RowLevelOperationRuntimeGroupFiltering.scala:120)
        at 
org.apache.spark.sql.execution.dynamicpruning.RowLevelOperationRuntimeGroupFiltering$$anonfun$apply$1$$anonfun$1.applyOrElse(RowLevelOperationRuntimeGroupFiltering.scala:71)
        at 
org.apache.spark.sql.execution.dynamicpruning.RowLevelOperationRuntimeGroupFiltering$$anonfun$apply$1$$anonfun$1.applyOrElse(RowLevelOperationRuntimeGroupFiltering.scala:58)
        
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to