[ 
https://issues.apache.org/jira/browse/HUDI-5293?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17654549#comment-17654549
 ] 

Jonathan Vexler edited comment on HUDI-5293 at 1/4/23 4:40 PM:
---------------------------------------------------------------

It seems like this was fixed by HUDI-5294. I tested the following steps in 
spark-sql in master and with 0.12.1 and it failed in 0.12.1 and worked in master

 
{code:java}
create table test_table2 (
    id int,
    dt string,
    name string,
    price double,
    ts long
) using hudi
tblproperties (
    primaryKey = 'id',
    type = 'cow',
    preCombineField = 'ts',
    'hoodie.datasource.meta.sync.enable' = 'false',
    'hoodie.datasource.hive_sync.enable' = 'false',
    'hoodie.schema.on.read.enable' = 'true'
)
partitioned by (dt)
location '/tmp/schema_evo/test_table2';
insert into test_table2 values
(1, 'a1', 10, 100, "2021-01-05"),
(2, 'a2', 20, 2000, "2021-01-06"),
(3, 'a3', 30, 3000, "2021-01-07");
set hoodie.datasource.write.reconcile.schema=true;
insert into test_table2 values
(4, 'a4', 14, 104, "2021-01-05"),
(5, 'a5', 25, 2050, "2021-01-06"),
(6, 'a6', 36, 3060, "2021-01-07");
{code}




 


was (Author: JIRAUSER295101):
It seems like this was fixed by HUDI-5294. I tested the following steps in 
spark-sql in master and with 0.12.1 and it failed in 0.12.1 and worked in master

```
create table test_table2 (
    id int,
    dt string,
    name string,
    price double,
    ts long
) using hudi
tblproperties (
    primaryKey = 'id',
    type = 'cow',
    preCombineField = 'ts',
    'hoodie.datasource.meta.sync.enable' = 'false',
    'hoodie.datasource.hive_sync.enable' = 'false',
    'hoodie.schema.on.read.enable' = 'true'
)
partitioned by (dt)
location '/tmp/schema_evo/test_table2';

insert into test_table2 values
(1, 'a1', 10, 100, "2021-01-05"),
(2, 'a2', 20, 2000, "2021-01-06"),
(3, 'a3', 30, 3000, "2021-01-07");

set hoodie.datasource.write.reconcile.schema=true;

insert into test_table2 values
(4, 'a4', 14, 104, "2021-01-05"),
(5, 'a5', 25, 2050, "2021-01-06"),
(6, 'a6', 36, 3060, "2021-01-07");
```

 

> Schema on read + reconcile schema fails w/ 0.12.1
> -------------------------------------------------
>
>                 Key: HUDI-5293
>                 URL: https://issues.apache.org/jira/browse/HUDI-5293
>             Project: Apache Hudi
>          Issue Type: Improvement
>          Components: writer-core
>            Reporter: sivabalan narayanan
>            Assignee: Jonathan Vexler
>            Priority: Major
>              Labels: pull-request-available
>             Fix For: 0.13.0
>
>
> if I do schema on read on commit1 and then schema on read + reconcile schema 
> for 2nd batch, it fails w/ 
> {code:java}
> warning: there was one deprecation warning; re-run with -deprecation for 
> details
> 22/11/28 16:44:26 ERROR BaseSparkCommitActionExecutor: Error upserting 
> bucketType UPDATE for partition :2
> java.lang.IllegalArgumentException: cannot modify hudi meta col: 
> _hoodie_commit_time
>       at 
> org.apache.hudi.internal.schema.action.TableChange$BaseColumnChange.checkColModifyIsLegal(TableChange.java:157)
>       at 
> org.apache.hudi.internal.schema.action.TableChanges$ColumnAddChange.addColumns(TableChanges.java:314)
>       at 
> org.apache.hudi.internal.schema.utils.AvroSchemaEvolutionUtils.lambda$reconcileSchema$5(AvroSchemaEvolutionUtils.java:92)
>       at 
> java.util.TreeMap$EntrySpliterator.forEachRemaining(TreeMap.java:2969)
>       at 
> java.util.stream.ReferencePipeline$Head.forEach(ReferencePipeline.java:580)
>       at 
> org.apache.hudi.internal.schema.utils.AvroSchemaEvolutionUtils.reconcileSchema(AvroSchemaEvolutionUtils.java:80)
>       at 
> org.apache.hudi.table.action.commit.HoodieMergeHelper.runMerge(HoodieMergeHelper.java:103)
>       at 
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdateInternal(BaseSparkCommitActionExecutor.java:358)
>       at 
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpdate(BaseSparkCommitActionExecutor.java:349)
>       at 
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.handleUpsertPartition(BaseSparkCommitActionExecutor.java:322)
>       at 
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.lambda$mapPartitionsAsRDD$a3ab3c4$1(BaseSparkCommitActionExecutor.java:244)
>       at 
> org.apache.spark.api.java.JavaRDDLike$$anonfun$mapPartitionsWithIndex$1.apply(JavaRDDLike.scala:102)
>       at 
> org.apache.spark.api.java.JavaRDDLike$$anonfun$mapPartitionsWithIndex$1.apply(JavaRDDLike.scala:102)
>       at 
> org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$25.apply(RDD.scala:875)
>       at 
> org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$25.apply(RDD.scala:875)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
>       at org.apache.spark.rdd.RDD$$anonfun$7.apply(RDD.scala:359)
>       at org.apache.spark.rdd.RDD$$anonfun$7.apply(RDD.scala:357)
>       at 
> org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1182)
>       at 
> org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1156)
>       at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1091)
>       at 
> org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1156)
>       at 
> org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:882)
>       at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:357)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:308)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
>       at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>       at org.apache.spark.scheduler.Task.run(Task.scala:123)
>       at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
>       at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
>       at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>       at java.lang.Thread.run(Thread.java:748) {code}
>  
>  
>  
>  
>  
>  



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to