linliu-code commented on code in PR #17556:
URL: https://github.com/apache/hudi/pull/17556#discussion_r2662241719
##########
hudi-spark-datasource/hudi-spark3.2plus-common/src/main/scala/org/apache/spark/sql/hudi/analysis/HoodieSpark32PlusAnalysis.scala:
##########
@@ -111,7 +111,13 @@ case class HoodieSpark32PlusResolveReferences(spark:
SparkSession) extends Rule[
lazy val analyzer = spark.sessionState.analyzer
val targetTable = if (targetTableO.resolved) targetTableO else
analyzer.execute(targetTableO)
val sourceTable = if (sourceTableO.resolved) sourceTableO else
analyzer.execute(sourceTableO)
- val m = mO.asInstanceOf[MergeIntoTable].copy(targetTable = targetTable,
sourceTable = sourceTable)
+ val originalMergeInto = mO.asInstanceOf[MergeIntoTable]
Review Comment:
This fixes some flakiness: In GH CI for java17 or 11 test modules, the child
thread executes without setting the java_home, and uses the "default" java
libraries that could be different from the expected one, and cause this error,
like
'''
TestMergeIntoTable2:
*** RUN ABORTED ***
java.lang.NoSuchMethodError:
'org.apache.spark.sql.catalyst.plans.logical.MergeIntoTable
org.apache.spark.sql.catalyst.plans.logical.MergeIntoTable.copy(org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan,
org.apache.spark.sql.catalyst.expressions.Expression, scala.collection.Seq,
scala.collection.Seq)'
at
org.apache.spark.sql.hudi.analysis.HoodieSpark32PlusResolveReferences$$anonfun$apply$1.applyOrElse(HoodieSpark32PlusAnalysis.scala:114)
at
org.apache.spark.sql.hudi.analysis.HoodieSpark32PlusResolveReferences$$anonfun$apply$1.applyOrElse(HoodieSpark32PlusAnalysis.scala:58)
at
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
at
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
at
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
at
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
at
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
at
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
at
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:31)
at
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUp(AnalysisHelper.scala:111)
'''
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]