[ 
https://issues.apache.org/jira/browse/HUDI-8629?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17913028#comment-17913028
 ] 

Y Ethan Guo commented on HUDI-8629:
-----------------------------------

Error thrown is:
{code:java}
[INTERNAL_ERROR] Cannot find main error class '_LEGACY_ERROR_TEMP_2309'
org.apache.spark.SparkException: [INTERNAL_ERROR] Cannot find main error class 
'_LEGACY_ERROR_TEMP_2309'
    at org.apache.spark.SparkException$.internalError(SparkException.scala:92)
    at org.apache.spark.SparkException$.internalError(SparkException.scala:96)
    at 
org.apache.spark.ErrorClassesJsonReader.$anonfun$getMessageTemplate$1(ErrorClassesJSONReader.scala:68)
    at scala.collection.immutable.HashMap$HashMap1.getOrElse0(HashMap.scala:361)
    at 
scala.collection.immutable.HashMap$HashTrieMap.getOrElse0(HashMap.scala:594)
    at 
scala.collection.immutable.HashMap$HashTrieMap.getOrElse0(HashMap.scala:589)
    at scala.collection.immutable.HashMap.getOrElse(HashMap.scala:73)
    at 
org.apache.spark.ErrorClassesJsonReader.getMessageTemplate(ErrorClassesJSONReader.scala:68)
    at 
org.apache.spark.ErrorClassesJsonReader.getErrorMessage(ErrorClassesJSONReader.scala:47)
    at 
org.apache.spark.SparkThrowableHelper$.getMessage(SparkThrowableHelper.scala:53)
    at 
org.apache.spark.SparkThrowableHelper$.getMessage(SparkThrowableHelper.scala:40)
    at org.apache.spark.sql.AnalysisException.<init>(AnalysisException.scala:77)
    at 
org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:52)
    at 
org.apache.spark.sql.HoodieSpark35CatalystPlanUtils$.failAnalysisForMIT(HoodieSpark35CatalystPlanUtils.scala:80)
    at 
org.apache.spark.sql.hudi.analysis.HoodieSpark3ResolveReferences.$anonfun$resolveMergeExprOrFail$2(HoodieSpark3Analysis.scala:263)
    at 
org.apache.spark.sql.hudi.analysis.HoodieSpark3ResolveReferences.$anonfun$resolveMergeExprOrFail$2$adapted(HoodieSpark3Analysis.scala:258)
 {code}
This is thrown from 
{code:java}
private def resolveMergeExprOrFail(e: Expression, p: LogicalPlan): Expression = 
{
  try {
    val resolved = resolveExpressionByPlanChildren(e, p)
    resolved.references.filter(!_.resolved).foreach { a =>
      // Note: This will throw error only on unresolved attribute issues,
      // not other resolution errors like mismatched data types.
      val cols = p.inputSet.toSeq.map(_.sql).mkString(", ")
      // START: custom Hudi change from spark because spark 3.4 constructor is 
different for fail analysis
      sparkAdapter.getCatalystPlanUtils.failAnalysisForMIT(a, cols)
      // END: custom Hudi change
    }
    resolved
  } catch {
    case x: AnalysisException =>
      throw x
  }
} {code}

> MergeInto w/ Partial updates pulls in fields from source not in assignment 
> clause
> ---------------------------------------------------------------------------------
>
>                 Key: HUDI-8629
>                 URL: https://issues.apache.org/jira/browse/HUDI-8629
>             Project: Apache Hudi
>          Issue Type: Sub-task
>            Reporter: sivabalan narayanan
>            Assignee: Y Ethan Guo
>            Priority: Blocker
>             Fix For: 1.0.1
>
>         Attachments: image-2024-12-02-04-07-54-483.png
>
>
> TestPartialUpdateForMergeInto.Test partial update with MOR and Avro log 
> format  w/ some slight changes. 
>  
> spark.sql(s"set 
> ${HoodieWriteConfig.MERGE_SMALL_FILE_GROUP_CANDIDATES_LIMIT.key} = 0")
> spark.sql(s"set 
> ${DataSourceWriteOptions.ENABLE_MERGE_INTO_PARTIAL_UPDATES.key} = true")
> spark.sql(s"set ${HoodieStorageConfig.LOGFILE_DATA_BLOCK_FORMAT.key} = 
> $logDataBlockFormat")
> spark.sql(s"set ${HoodieReaderConfig.FILE_GROUP_READER_ENABLED.key} = false")
> // Create a table with five data fields
> spark.sql(
> s"""
> |create table $tableName (
> | id int,
> | name string,
> | price long,
> | _ts long,
> | description string
> |) using hudi
> |tblproperties(
> | type ='$tableType',
> | primaryKey = 'id',
> | preCombineField = '_ts'
> |)
> |location '$basePath'
> """.stripMargin)
> spark.sql(s"insert into $tableName values (1, 'a1', 10, 1000, 'a1: desc1')," +
> "(2, 'a2', 20, 1200, 'a2: desc2'), (3, 'a3', 30.0, 1250, 'a3: desc3')")
>  
>  
> spark.sql(
> s"""
> |merge into $tableName t0
> |using ( select 1 as id, 'a1' as name, 12 as price, 1001 as ts
> |union select 3 as id, 'a3' as name, 25 as price, 1260 as ts) s0
> |on t0.id = s0.id
> |when matched then update set price = s0.price, _ts = s0.ts
> |""".stripMargin)
>  
>  
> While executing this MergeInto statement, we modify the schema to be as 
> follows. 
> !image-2024-12-02-04-07-54-483.png!
>  
>  



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to