[
https://issues.apache.org/jira/browse/HUDI-8569?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Y Ethan Guo updated HUDI-8569:
------------------------------
Parent: HUDI-8722
Issue Type: Sub-task (was: Bug)
> Insert overwrite / update MOR with global index does not work
> -------------------------------------------------------------
>
> Key: HUDI-8569
> URL: https://issues.apache.org/jira/browse/HUDI-8569
> Project: Apache Hudi
> Issue Type: Sub-task
> Reporter: Davis Zhang
> Priority: Critical
> Fix For: 1.0.1
>
>
> HoodieException: Field at _hoodie_record_key is not present in
> StructType(StructField(c1,IntegerType,true),StructField(c3,StringType,true),StructField(ts,LongType,true),StructField(c2,IntegerType,true))
> found in hudi 1.0 git hash ae5833ea34afc8849722509d185f4e94dd3fb424, spark 3.4
> ```
> test("Test Type Casting with Global Index for Primary Key and Partition Key
> Updates") {
> Seq("mor").foreach { tableType =>
> withRecordType()(withTempDir { tmp =>
> withSQLConf("hoodie.index.type" -> "GLOBAL_SIMPLE",
> "hoodie.simple.index.update.partition.path" -> "true") {
> val tableName = generateTableName
> // Create table with both primary key and partition key
> spark.sql(
> s"""
> |create table $tableName (|
> |c1 int,|
> |c2 int,|
> |c3 string,|
> |ts long|
> |) using hudi|
> |partitioned by (c2)|
> |location '${tmp.getCanonicalPath}/$tableName'|
> |tblproperties (|
> |type = '$tableType',|
> |primaryKey = 'c1',|
> |preCombineField = 'ts'|
> |)
> """.stripMargin)|
> // Test Case 1: Initial insert with double values
> spark.sql(
> s"""
> |insert into $tableName|
> |select|
> |cast(1.0 as double) as c1,|
> |cast(1.0 as double) as c2,|
> |'a' as c3,|
> |1000 as ts
> """.stripMargin)|
> // Verify initial insert
> checkAnswer(
> s"select c1, c2, c3 from $tableName")(
> Seq(1, 1, "a")
> )
> spark.sql(
> s"""
> |insert into $tableName|
> |select|
> |cast(1.1 as double) as c1,|
> |cast(2.2 as double) as c2,|
> |'a' as c3,|
> |1001 as ts
> """.stripMargin)
> // Verify partition key update
> checkAnswer(
> s"select c1, c2, c3 from $tableName")(
> Seq(1, 2, "a")
> )|
> // Test Case 3: Insert overwrite with double values
> spark.sql(
> s"""
> |insert overwrite table $tableName partition (c2)|
> |select|
> |cast(2.3 as double) as c1,|
> |cast(3.3 as double) as c2,|
> |'a' as c3,|
> |1003 as ts
> """.stripMargin)|
> // Additional verification: check complete table state with sorting
> checkAnswer(s"select c1, c2, c3 from $tableName order by c1, c2")(Seq(2, 3,
> "a"))
> }
> })
> }
> }
> ```
>
> got error
> ```
> Failed to upsert for commit time 20241123005837776
> org.apache.hudi.exception.HoodieUpsertException: Failed to upsert for commit
> time 20241123005837776
> at
> org.apache.hudi.table.action.commit.BaseWriteHelper.write(BaseWriteHelper.java:80)
> at
> org.apache.hudi.table.action.deltacommit.SparkUpsertDeltaCommitActionExecutor.execute(SparkUpsertDeltaCommitActionExecutor.java:47)
> at
> org.apache.hudi.table.HoodieSparkMergeOnReadTable.upsert(HoodieSparkMergeOnReadTable.java:98)
> at
> org.apache.hudi.table.HoodieSparkMergeOnReadTable.upsert(HoodieSparkMergeOnReadTable.java:88)
> at
> org.apache.hudi.client.SparkRDDWriteClient.upsert(SparkRDDWriteClient.java:162)
> at org.apache.hudi.DataSourceUtils.doWriteOperation(DataSourceUtils.java:221)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.liftedTree1$1(HoodieSparkSqlWriter.scala:484)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.writeInternal(HoodieSparkSqlWriter.scala:482)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.write(HoodieSparkSqlWriter.scala:180)
> at org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:118)
> at
> org.apache.spark.sql.hudi.command.InsertIntoHoodieTableCommand$.run(InsertIntoHoodieTableCommand.scala:100)
> at
> org.apache.spark.sql.hudi.command.InsertIntoHoodieTableCommand.run(InsertIntoHoodieTableCommand.scala:61)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:75)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:73)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:98)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:118)
> at
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:195)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:103)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:827)
> at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:98)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:94)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
> at
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:488)
> at
> org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:94)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:81)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:79)
> at org.apache.spark.sql.Dataset.<init>(Dataset.scala:219)
> at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:99)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:827)
> at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:96)
> at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:640)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:827)
> at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:630)
> at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:671)
> at
> org.apache.spark.sql.hudi.TestTableColumnTypeMismatch.$anonfun$new$39(TestTableColumnTypeMismatch.scala:809)
> at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.withSQLConf(HoodieSparkSqlTestBase.scala:188)
> at
> org.apache.spark.sql.hudi.TestTableColumnTypeMismatch.$anonfun$new$38(TestTableColumnTypeMismatch.scala:769)
> at
> org.apache.spark.sql.hudi.TestTableColumnTypeMismatch.$anonfun$new$38$adapted(TestTableColumnTypeMismatch.scala:767)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.withTempDir(HoodieSparkSqlTestBase.scala:77)
> at
> org.apache.spark.sql.hudi.TestTableColumnTypeMismatch.$anonfun$new$37(TestTableColumnTypeMismatch.scala:767)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.$anonfun$withRecordType$3(HoodieSparkSqlTestBase.scala:216)
> at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.withSQLConf(HoodieSparkSqlTestBase.scala:188)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.$anonfun$withRecordType$1(HoodieSparkSqlTestBase.scala:215)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.$anonfun$withRecordType$1$adapted(HoodieSparkSqlTestBase.scala:207)
> at scala.collection.immutable.List.foreach(List.scala:431)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.withRecordType(HoodieSparkSqlTestBase.scala:207)
> at
> org.apache.spark.sql.hudi.TestTableColumnTypeMismatch.$anonfun$new$36(TestTableColumnTypeMismatch.scala:767)
> at
> org.apache.spark.sql.hudi.TestTableColumnTypeMismatch.$anonfun$new$36$adapted(TestTableColumnTypeMismatch.scala:766)
> at scala.collection.immutable.List.foreach(List.scala:431)
> at
> org.apache.spark.sql.hudi.TestTableColumnTypeMismatch.$anonfun$new$35(TestTableColumnTypeMismatch.scala:766)
> at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.$anonfun$test$1(HoodieSparkSqlTestBase.scala:85)
> at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
> at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
> at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
> at org.scalatest.Transformer.apply(Transformer.scala:22)
> at org.scalatest.Transformer.apply(Transformer.scala:20)
> at
> org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
> at org.scalatest.TestSuite.withFixture(TestSuite.scala:196)
> at org.scalatest.TestSuite.withFixture$(TestSuite.scala:195)
> at org.scalatest.funsuite.AnyFunSuite.withFixture(AnyFunSuite.scala:1562)
> at
> org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
> at
> org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
> at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
> at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
> at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
> at org.scalatest.funsuite.AnyFunSuite.runTest(AnyFunSuite.scala:1562)
> at
> org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
> at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
> at scala.collection.immutable.List.foreach(List.scala:431)
> at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
> at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
> at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
> at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
> at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
> at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
> at org.scalatest.Suite.run(Suite.scala:1112)
> at org.scalatest.Suite.run$(Suite.scala:1094)
> at
> org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
> at
> org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
> at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
> at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
> at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.org$scalatest$BeforeAndAfterAll$$super$run(HoodieSparkSqlTestBase.scala:44)
> at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
> at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
> at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
> at
> org.apache.spark.sql.hudi.HoodieSparkSqlTestBase.run(HoodieSparkSqlTestBase.scala:44)
> at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
> at
> org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1314)
> at
> org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1308)
> at scala.collection.immutable.List.foreach(List.scala:431)
> at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1308)
> at
> org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:993)
> at
> org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:971)
> at
> org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1474)
> at
> org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:971)
> at org.scalatest.tools.Runner$.run(Runner.scala:798)
> at org.scalatest.tools.Runner.run(Runner.scala)
> at
> org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.runScalaTest2or3(ScalaTestRunner.java:43)
> at
> org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.main(ScalaTestRunner.java:26)
> Caused by: org.apache.spark.SparkException: Job aborted due to stage failure:
> Task 1 in stage 175.0 failed 1 times, most recent failure: Lost task 1.0 in
> stage 175.0 (TID 726) (10.0.0.72 executor driver):
> org.apache.hudi.exception.HoodieException: Field at _hoodie_record_key is not
> present in
> StructType(StructField(c1,IntegerType,true),StructField(c3,StringType,true),StructField(ts,LongType,true),StructField(c2,IntegerType,true))
> at
> org.apache.hudi.common.model.HoodieSparkRecord.getValue(HoodieSparkRecord.java:454)
> at
> org.apache.hudi.common.model.HoodieSparkRecord.convertToHoodieSparkRecord(HoodieSparkRecord.java:416)
> at
> org.apache.hudi.common.model.HoodieSparkRecord.convertToHoodieSparkRecord(HoodieSparkRecord.java:406)
> at
> org.apache.hudi.common.model.HoodieSparkRecord.wrapIntoHoodieRecordPayloadWithParams(HoodieSparkRecord.java:267)
> at
> org.apache.hudi.index.HoodieIndexUtils.mergeIncomingWithExistingRecord(HoodieIndexUtils.java:308)
> at
> org.apache.hudi.index.HoodieIndexUtils.lambda$mergeForPartitionUpdatesIfNeeded$8fea8a5b$1(HoodieIndexUtils.java:361)
> at
> org.apache.hudi.data.HoodieJavaRDD.lambda$flatMap$a6598fcb$1(HoodieJavaRDD.java:137)
> at
> org.apache.spark.api.java.JavaRDDLike.$anonfun$flatMap$1(JavaRDDLike.scala:125)
> at scala.collection.Iterator$$anon$11.nextCur(Iterator.scala:486)
> at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:492)
> at
> org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:223)
> at
> org.apache.spark.storage.memory.MemoryStore.putIteratorAsBytes(MemoryStore.scala:352)
> at
> org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1552)
> at
> org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1462)
> at
> org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1526)
> at
> org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1349)
> at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:375)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:326)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:364)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:328)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:364)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:328)
> at
> org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
> at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:101)
> at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
> at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)
> at org.apache.spark.scheduler.Task.run(Task.scala:139)
> at
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:554)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1529)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:557)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:750)
>
> Driver stacktrace:
> at
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2785)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2721)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2720)
> at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2720)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1206)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1206)
> at scala.Option.foreach(Option.scala:407)
> at
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1206)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2984)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2923)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2912)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:971)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2263)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2284)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2303)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2328)
> at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1019)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:405)
> at org.apache.spark.rdd.RDD.collect(RDD.scala:1018)
> at
> org.apache.spark.rdd.PairRDDFunctions.$anonfun$countByKey$1(PairRDDFunctions.scala:367)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:405)
> at
> org.apache.spark.rdd.PairRDDFunctions.countByKey(PairRDDFunctions.scala:367)
> at org.apache.spark.api.java.JavaPairRDD.countByKey(JavaPairRDD.scala:314)
> at
> org.apache.hudi.data.HoodieJavaPairRDD.countByKey(HoodieJavaPairRDD.java:105)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.buildProfile(BaseSparkCommitActionExecutor.java:220)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:186)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:90)
> at
> org.apache.hudi.table.action.commit.BaseWriteHelper.write(BaseWriteHelper.java:74)
> ... 111 more
> Caused by: org.apache.hudi.exception.HoodieException: Field at
> _hoodie_record_key is not present in
> StructType(StructField(c1,IntegerType,true),StructField(c3,StringType,true),StructField(ts,LongType,true),StructField(c2,IntegerType,true))
> at
> org.apache.hudi.common.model.HoodieSparkRecord.getValue(HoodieSparkRecord.java:454)
> at
> org.apache.hudi.common.model.HoodieSparkRecord.convertToHoodieSparkRecord(HoodieSparkRecord.java:416)
> at
> org.apache.hudi.common.model.HoodieSparkRecord.convertToHoodieSparkRecord(HoodieSparkRecord.java:406)
> at
> org.apache.hudi.common.model.HoodieSparkRecord.wrapIntoHoodieRecordPayloadWithParams(HoodieSparkRecord.java:267)
> at
> org.apache.hudi.index.HoodieIndexUtils.mergeIncomingWithExistingRecord(HoodieIndexUtils.java:308)
> at
> org.apache.hudi.index.HoodieIndexUtils.lambda$mergeForPartitionUpdatesIfNeeded$8fea8a5b$1(HoodieIndexUtils.java:361)
> at
> org.apache.hudi.data.HoodieJavaRDD.lambda$flatMap$a6598fcb$1(HoodieJavaRDD.java:137)
> at
> org.apache.spark.api.java.JavaRDDLike.$anonfun$flatMap$1(JavaRDDLike.scala:125)
> at scala.collection.Iterator$$anon$11.nextCur(Iterator.scala:486)
> at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:492)
> at
> org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:223)
> at
> org.apache.spark.storage.memory.MemoryStore.putIteratorAsBytes(MemoryStore.scala:352)
> at
> org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1552)
> at
> org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1462)
> at
> org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1526)
> at
> org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1349)
> at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:375)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:326)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:364)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:328)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:364)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:328)
> at
> org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
> at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:101)
> at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
> at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)
> at org.apache.spark.scheduler.Task.run(Task.scala:139)
> at
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:554)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1529)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:557)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:750)
> ```
>
>
> Another example, Same error at update statement
> ```
> test("Test Type Casting with Global Index for Primary Key and Partition Key
> Updates") {
> Seq("cow", "mor").foreach { tableType =>
> withRecordType()(withTempDir { tmp =>
> withSQLConf("hoodie.index.type" -> "GLOBAL_SIMPLE",
> "hoodie.simple.index.update.partition.path" -> "true",
> "hoodie.spark.sql.optimized.writes.enable" -> "false") {
> val tableName = generateTableName
> // Create table with both primary key and partition key
> spark.sql(
> s"""
> |create table $tableName (
> | c1 int,
> | c2 int,
> | c3 string,
> | ts long
> |) using hudi
> |partitioned by (c2)
> |location '${tmp.getCanonicalPath}/$tableName'
> |tblproperties (
> | type = '$tableType',
> | primaryKey = 'c1',
> | preCombineField = 'ts'
> |)
> """.stripMargin)
> // Test Case 1: Initial insert with double values
> spark.sql(
> s"""
> |insert into $tableName
> |select
> | cast(1.0 as double) as c1,
> | cast(1.0 as double) as c2,
> | 'a' as c3,
> | 1000 as ts
> """.stripMargin)
> // Verify initial insert
> checkAnswer(
> s"select c1, c2, c3 from $tableName")(
> Seq(1, 1, "a")
> )
> // Test Case 2: Update partition key (c2)
> spark.sql(
> s"""
> |update $tableName
> |set c2 = cast(2.0 as double)
> |where c3 = 'a'
> """.stripMargin)
> // Verify partition key update
> checkAnswer(
> s"select c1, c2, c3 from $tableName")(
> Seq(1, 2, "a")
> )
> // // Test Case 3: Insert overwrite with double values
> // spark.sql(
> // s"""
> // |insert overwrite table $tableName
> // |select
> // | cast(3.0 as double) as c1,
> // | cast(3.0 as double) as c2,
> // | 'a' as c3,
> // | 1003 as ts
> // """.stripMargin)
> //
> // // Verify final state after insert overwrite
> // checkAnswer(
> // s"select c1, c2, c3 from $tableName")(
> // Seq(3, 3, "a")
> // )
> //
> // // Additional verification: check complete table state with sorting
> // checkAnswer(
> // s"""
> // |select c1, c2, c3 from $tableName
> // |order by c1, c2
> // """)(
> // Seq(3, 3, "a")
> // )
> //
> // // Verify record count
> // val count = spark.sql(s"select count(*) from
> $tableName").collect()(0).getLong(0)
> // assert(count == 1L,
> // s"$tableType table: Expected 1 record but found $count records")
> }
> })
> }
> }
> ```
>
--
This message was sent by Atlassian Jira
(v8.20.10#820010)