[ 
https://issues.apache.org/jira/browse/HUDI-2259?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17391127#comment-17391127
 ] 

ASF GitHub Bot commented on HUDI-2259:
--------------------------------------

hudi-bot edited a comment on pull request #3377:
URL: https://github.com/apache/hudi/pull/3377#issuecomment-890478045


   <!--
   Meta data
   {
     "version" : 1,
     "metaDataEntries" : [ {
       "hash" : "bd1fca2aedb3a1e01096cb244a2d55a1e4d11048",
       "status" : "FAILURE",
       "url" : 
"https://dev.azure.com/apache-hudi-ci-org/785b6ef4-2f42-4a89-8f0e-5f0d7039a0cc/_build/results?buildId=1292";,
       "triggerID" : "bd1fca2aedb3a1e01096cb244a2d55a1e4d11048",
       "triggerType" : "PUSH"
     } ]
   }-->
   ## CI report:
   
   * bd1fca2aedb3a1e01096cb244a2d55a1e4d11048 Azure: 
[FAILURE](https://dev.azure.com/apache-hudi-ci-org/785b6ef4-2f42-4a89-8f0e-5f0d7039a0cc/_build/results?buildId=1292)
 
   
   <details>
   <summary>Bot commands</summary>
     @hudi-bot supports the following commands:
   
    - `@hudi-bot run travis` re-run the last Travis build
    - `@hudi-bot run azure` re-run the last Azure build
   </details>


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> Merge Into when source table with columnAliases throws exception
> ----------------------------------------------------------------
>
>                 Key: HUDI-2259
>                 URL: https://issues.apache.org/jira/browse/HUDI-2259
>             Project: Apache Hudi
>          Issue Type: Bug
>          Components: Spark Integration
>            Reporter: 董可伦
>            Assignee: 董可伦
>            Priority: Major
>              Labels: pull-request-available
>             Fix For: 0.9.0
>
>
> val tableName = "test_hudi_table"
> spark.sql(
>  s"""
>  | create table ${tableName} (
>  | id int,
>  | name string,
>  | price double,
>  | ts long
>  |) using hudi
>  | options (
>  | primaryKey = 'id',
>  | type = 'cow'
>  | )
>  | location '/tmp/${tableName}'
>  |""".stripMargin)
> spark.sql(
>  s"""
>  | merge into $tableName as t0
>  | using (
>  | select 1, 'a1', 12, 1003
>  | ) s0 (id,name,price,ts)
>  | on s0.id = t0.id
>  | when matched and id != 1 then update set *
>  | when matched and s0.id = 1 then delete
>  | when not matched then insert *
>  """.stripMargin)
> Exception in thread "main" org.apache.spark.sql.AnalysisException: Cannot 
> resolve 's0.id in (`s0.id` = `t0.id`), the input columns is: [id#4, name#5, 
> price#6, ts#7, _hoodie_commit_time#8, _hoodie_commit_seqno#9, 
> _hoodie_record_key#10, _hoodie_partition_path#11, _hoodie_file_name#12, 
> id#13, name#14, price#15, ts#16L];Exception in thread "main" 
> org.apache.spark.sql.AnalysisException: Cannot resolve 's0.id in (`s0.id` = 
> `t0.id`), the input columns is: [id#4, name#5, price#6, ts#7, 
> _hoodie_commit_time#8, _hoodie_commit_seqno#9, _hoodie_record_key#10, 
> _hoodie_partition_path#11, _hoodie_file_name#12, id#13, name#14, price#15, 
> ts#16L]; at 
> org.apache.spark.sql.hudi.analysis.HoodieResolveReferences.org$apache$spark$sql$hudi$analysis$HoodieResolveReferences$$resolveExpressionFrom(HoodieAnalysis.scala:292)
>  at 
> org.apache.spark.sql.hudi.analysis.HoodieResolveReferences$$anonfun$apply$1.applyOrElse(HoodieAnalysis.scala:160)
>  at 
> org.apache.spark.sql.hudi.analysis.HoodieResolveReferences$$anonfun$apply$1.applyOrElse(HoodieAnalysis.scala:103)
>  at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$apply$1.apply(AnalysisHelper.scala:90)
>  at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$apply$1.apply(AnalysisHelper.scala:90)
>  at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:69)
>  at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:89)
>  at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:86)
>  at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194)
>  at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperatorsUp(AnalysisHelper.scala:86)
>  at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29)
>  
>  
>  



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to