[
https://issues.apache.org/jira/browse/HUDI-6995?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Y Ethan Guo updated HUDI-6995:
------------------------------
Fix Version/s: 1.0.2
> Key generation failure
> ----------------------
>
> Key: HUDI-6995
> URL: https://issues.apache.org/jira/browse/HUDI-6995
> Project: Apache Hudi
> Issue Type: Sub-task
> Reporter: Lin Liu
> Priority: Major
> Fix For: 1.0.2
>
>
> {code:java}
> Caused by: org.apache.spark.SparkException: Job aborted due to stage failure:
> Task 0 in stage 435.0 failed 4 times, most recent failure: Lost task 0.3 in
> stage 435.0 (TID 36957) (ip-10-0-109-9.us-east-2.compute.internal executor
> 3): org.apache.hudi.exception.HoodieKeyException: recordKey value: "null" for
> field: "cc_call_center_sk" cannot be null or empty. at
> org.apache.hudi.keygen.KeyGenUtils.getRecordKey(KeyGenUtils.java:195)
> at
> org.apache.hudi.keygen.NonpartitionedAvroKeyGenerator.getRecordKey(NonpartitionedAvroKeyGenerator.java:56)
> at
> org.apache.hudi.keygen.NonpartitionedKeyGenerator.getRecordKey(NonpartitionedKeyGenerator.java:54)
> at
> org.apache.hudi.keygen.BaseKeyGenerator.getKey(BaseKeyGenerator.java:70)
> at
> org.apache.spark.sql.hudi.command.SqlKeyGenerator.$anonfun$getRecordKey$1(SqlKeyGenerator.scala:79)
> at scala.Option.map(Option.scala:230) at
> org.apache.spark.sql.hudi.command.SqlKeyGenerator.getRecordKey(SqlKeyGenerator.scala:79)
> at
> org.apache.hudi.HoodieCreateRecordUtils$.getHoodieKeyAndMaybeLocationFromAvroRecord(HoodieCreateRecordUtils.scala:202)
> at
> org.apache.hudi.HoodieCreateRecordUtils$.$anonfun$createHoodieRecordRdd$5(HoodieCreateRecordUtils.scala:129)
> at scala.collection.Iterator$$anon$10.next(Iterator.scala:461) at
> org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:224)
> at
> org.apache.spark.storage.memory.MemoryStore.putIteratorAsBytes(MemoryStore.scala:352)
> at
> org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1535)
> at
> org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1445)
> at
> org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1509)
> at
> org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1332)
> at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:376) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:327) at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:329) at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:329) at
> org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
> at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
> at org.apache.spark.scheduler.Task.run(Task.scala:138) at
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:548)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1516)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:551)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:750)
> Driver stacktrace: at
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2863)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2799)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2798)
> at
> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) at
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2798)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1239)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1239)
> at scala.Option.foreach(Option.scala:407) at
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1239)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3051)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2993)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2982)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) at
> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:1009)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2229) at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2250) at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2269) at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2294) at
> org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1021) at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:406) at
> org.apache.spark.rdd.RDD.collect(RDD.scala:1020) at
> org.apache.spark.rdd.PairRDDFunctions.$anonfun$countByKey$1(PairRDDFunctions.scala:367)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:406) at
> org.apache.spark.rdd.PairRDDFunctions.countByKey(PairRDDFunctions.scala:367)
> at org.apache.spark.api.java.JavaPairRDD.countByKey(JavaPairRDD.scala:314)
> at
> org.apache.hudi.data.HoodieJavaPairRDD.countByKey(HoodieJavaPairRDD.java:105)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.buildProfile(BaseSparkCommitActionExecutor.java:196)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:170)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:82)
> at
> org.apache.hudi.table.action.commit.BaseWriteHelper.write(BaseWriteHelper.java:63)
> ... 64 moreCaused by: org.apache.hudi.exception.HoodieKeyException:
> recordKey value: "null" for field: "cc_call_center_sk" cannot be null or
> empty. at
> org.apache.hudi.keygen.KeyGenUtils.getRecordKey(KeyGenUtils.java:195)
> at
> org.apache.hudi.keygen.NonpartitionedAvroKeyGenerator.getRecordKey(NonpartitionedAvroKeyGenerator.java:56)
> at
> org.apache.hudi.keygen.NonpartitionedKeyGenerator.getRecordKey(NonpartitionedKeyGenerator.java:54)
> at
> org.apache.hudi.keygen.BaseKeyGenerator.getKey(BaseKeyGenerator.java:70)
> at
> org.apache.spark.sql.hudi.command.SqlKeyGenerator.$anonfun$getRecordKey$1(SqlKeyGenerator.scala:79)
> at scala.Option.map(Option.scala:230) at
> org.apache.spark.sql.hudi.command.SqlKeyGenerator.getRecordKey(SqlKeyGenerator.scala:79)
> at
> org.apache.hudi.HoodieCreateRecordUtils$.getHoodieKeyAndMaybeLocationFromAvroRecord(HoodieCreateRecordUtils.scala:202)
> at
> org.apache.hudi.HoodieCreateRecordUtils$.$anonfun$createHoodieRecordRdd$5(HoodieCreateRecordUtils.scala:129)
> at scala.collection.Iterator$$anon$10.next(Iterator.scala:461) at
> org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:224)
> at
> org.apache.spark.storage.memory.MemoryStore.putIteratorAsBytes(MemoryStore.scala:352)
> at
> org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1535)
> at
> org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1445)
> at
> org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1509)
> at
> org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1332)
> at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:376) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:327) at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:329) at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:329) at
> org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
> at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
> at org.apache.spark.scheduler.Task.run(Task.scala:138) at
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:548)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1516)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:551)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:750)
> at java.util.concurrent.FutureTask.report(FutureTask.java:122) ~[?:?]
> at java.util.concurrent.FutureTask.get(FutureTask.java:191) ~[?:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor.checkResults(LSTBenchmarkExecutor.java:165)
> [lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor.execute(LSTBenchmarkExecutor.java:121)
> [lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.Driver.main(Driver.java:147)
> [lst-bench-0.1-SNAPSHOT.jar:?]Caused by: java.sql.SQLException:
> org.apache.hive.service.cli.HiveSQLException: Error running query:
> org.apache.hudi.exception.HoodieUpsertException: Failed to upsert for commit
> time 20231026200406206 at
> org.apache.spark.sql.hive.thriftserver.HiveThriftServerErrors$.runningQueryError(HiveThriftServerErrors.scala:44)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:325)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$2(SparkExecuteStatementOperation.scala:230)
> at
> scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:79)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:63)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:43)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:230)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:225)
> at java.base/java.security.AccessController.doPrivileged(Native Method) at
> java.base/javax.security.auth.Subject.doAs(Subject.java:423) at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1878)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:239)
> at
> java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
> at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
> at java.base/java.lang.Thread.run(Thread.java:829)Caused by:
> org.apache.hudi.exception.HoodieUpsertException: Failed to upsert for commit
> time 20231026200406206 at
> org.apache.hudi.table.action.commit.BaseWriteHelper.write(BaseWriteHelper.java:70)
> at
> org.apache.hudi.table.action.deltacommit.SparkInsertDeltaCommitActionExecutor.execute(SparkInsertDeltaCommitActionExecutor.java:45)
> at
> org.apache.hudi.table.HoodieSparkMergeOnReadTable.insert(HoodieSparkMergeOnReadTable.java:103)
> at
> org.apache.hudi.table.HoodieSparkMergeOnReadTable.insert(HoodieSparkMergeOnReadTable.java:88)
> at
> org.apache.hudi.client.SparkRDDWriteClient.insert(SparkRDDWriteClient.java:167)
> at
> org.apache.hudi.DataSourceUtils.doWriteOperation(DataSourceUtils.java:218)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.writeInternal(HoodieSparkSqlWriter.scala:486)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.write(HoodieSparkSqlWriter.scala:176)
> at
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:121)
> at
> org.apache.spark.sql.hudi.command.InsertIntoHoodieTableCommand$.run(InsertIntoHoodieTableCommand.scala:108)
> at
> org.apache.spark.sql.hudi.command.InsertIntoHoodieTableCommand.run(InsertIntoHoodieTableCommand.scala:61)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:75)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:73)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:104)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.executeQuery$1(SQLExecution.scala:114)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$7(SQLExecution.scala:139)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:139)
> at
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:245)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:138)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:101)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:97)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:179)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:602)
> at
> org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:97)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:82)
> at org.apache.spark.sql.Dataset.<init>(Dataset.scala:222) at
> org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:102) at
> org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:99) at
> org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:622)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.SparkSession.sql(SparkSession.scala:617) at
> org.apache.spark.sql.SQLContext.sql(SQLContext.scala:651) at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:291)
> ... 16 moreCaused by: org.apache.spark.SparkException: Job aborted due
> to stage failure: Task 0 in stage 435.0 failed 4 times, most recent failure:
> Lost task 0.3 in stage 435.0 (TID 36957)
> (ip-10-0-109-9.us-east-2.compute.internal executor 3):
> org.apache.hudi.exception.HoodieKeyException: recordKey value: "null" for
> field: "cc_call_center_sk" cannot be null or empty. at
> org.apache.hudi.keygen.KeyGenUtils.getRecordKey(KeyGenUtils.java:195)
> at
> org.apache.hudi.keygen.NonpartitionedAvroKeyGenerator.getRecordKey(NonpartitionedAvroKeyGenerator.java:56)
> at
> org.apache.hudi.keygen.NonpartitionedKeyGenerator.getRecordKey(NonpartitionedKeyGenerator.java:54)
> at
> org.apache.hudi.keygen.BaseKeyGenerator.getKey(BaseKeyGenerator.java:70)
> at
> org.apache.spark.sql.hudi.command.SqlKeyGenerator.$anonfun$getRecordKey$1(SqlKeyGenerator.scala:79)
> at scala.Option.map(Option.scala:230) at
> org.apache.spark.sql.hudi.command.SqlKeyGenerator.getRecordKey(SqlKeyGenerator.scala:79)
> at
> org.apache.hudi.HoodieCreateRecordUtils$.getHoodieKeyAndMaybeLocationFromAvroRecord(HoodieCreateRecordUtils.scala:202)
> at
> org.apache.hudi.HoodieCreateRecordUtils$.$anonfun$createHoodieRecordRdd$5(HoodieCreateRecordUtils.scala:129)
> at scala.collection.Iterator$$anon$10.next(Iterator.scala:461) at
> org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:224)
> at
> org.apache.spark.storage.memory.MemoryStore.putIteratorAsBytes(MemoryStore.scala:352)
> at
> org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1535)
> at
> org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1445)
> at
> org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1509)
> at
> org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1332)
> at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:376) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:327) at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:329) at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:329) at
> org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
> at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
> at org.apache.spark.scheduler.Task.run(Task.scala:138) at
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:548)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1516)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:551)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:750)
> Driver stacktrace: at
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2863)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2799)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2798)
> at
> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) at
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2798)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1239)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1239)
> at scala.Option.foreach(Option.scala:407) at
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1239)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3051)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2993)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2982)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) at
> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:1009)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2229) at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2250) at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2269) at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2294) at
> org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1021) at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:406) at
> org.apache.spark.rdd.RDD.collect(RDD.scala:1020) at
> org.apache.spark.rdd.PairRDDFunctions.$anonfun$countByKey$1(PairRDDFunctions.scala:367)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:406) at
> org.apache.spark.rdd.PairRDDFunctions.countByKey(PairRDDFunctions.scala:367)
> at org.apache.spark.api.java.JavaPairRDD.countByKey(JavaPairRDD.scala:314)
> at
> org.apache.hudi.data.HoodieJavaPairRDD.countByKey(HoodieJavaPairRDD.java:105)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.buildProfile(BaseSparkCommitActionExecutor.java:196)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:170)
> at
> org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:82)
> at
> org.apache.hudi.table.action.commit.BaseWriteHelper.write(BaseWriteHelper.java:63)
> ... 64 moreCaused by: org.apache.hudi.exception.HoodieKeyException:
> recordKey value: "null" for field: "cc_call_center_sk" cannot be null or
> empty. at
> org.apache.hudi.keygen.KeyGenUtils.getRecordKey(KeyGenUtils.java:195)
> at
> org.apache.hudi.keygen.NonpartitionedAvroKeyGenerator.getRecordKey(NonpartitionedAvroKeyGenerator.java:56)
> at
> org.apache.hudi.keygen.NonpartitionedKeyGenerator.getRecordKey(NonpartitionedKeyGenerator.java:54)
> at
> org.apache.hudi.keygen.BaseKeyGenerator.getKey(BaseKeyGenerator.java:70)
> at
> org.apache.spark.sql.hudi.command.SqlKeyGenerator.$anonfun$getRecordKey$1(SqlKeyGenerator.scala:79)
> at scala.Option.map(Option.scala:230) at
> org.apache.spark.sql.hudi.command.SqlKeyGenerator.getRecordKey(SqlKeyGenerator.scala:79)
> at
> org.apache.hudi.HoodieCreateRecordUtils$.getHoodieKeyAndMaybeLocationFromAvroRecord(HoodieCreateRecordUtils.scala:202)
> at
> org.apache.hudi.HoodieCreateRecordUtils$.$anonfun$createHoodieRecordRdd$5(HoodieCreateRecordUtils.scala:129)
> at scala.collection.Iterator$$anon$10.next(Iterator.scala:461) at
> org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:224)
> at
> org.apache.spark.storage.memory.MemoryStore.putIteratorAsBytes(MemoryStore.scala:352)
> at
> org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1535)
> at
> org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1445)
> at
> org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1509)
> at
> org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1332)
> at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:376) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:327) at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:329) at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365) at
> org.apache.spark.rdd.RDD.iterator(RDD.scala:329) at
> org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
> at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
> at org.apache.spark.scheduler.Task.run(Task.scala:138) at
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:548)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1516)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:551)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:750)
> at
> org.apache.hive.jdbc.HiveStatement.waitForOperationToComplete(HiveStatement.java:401)
> ~[hive-jdbc-3.1.3.jar:3.1.3] at
> org.apache.hive.jdbc.HiveStatement.execute(HiveStatement.java:266)
> ~[hive-jdbc-3.1.3.jar:3.1.3] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.executeTask(LSTBenchmarkExecutor.java:274)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.call(LSTBenchmarkExecutor.java:248)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.call(LSTBenchmarkExecutor.java:222)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
> ~[?:?] at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
> ~[?:?] at java.lang.Thread.run(Thread.java:829)
> ~[?:?]2023-10-26T20:04:18,360 INFO [main] telemetry.JDBCTelemetryRegistry:
> Flushing events to database...2023-10-26T20:04:25,022 INFO [main]
> telemetry.JDBCTelemetryRegistry: Events flushed to
> database.2023-10-26T20:04:25,028 INFO [main] common.LSTBenchmarkExecutor:
> Phase build finished in 62 seconds.2023-10-26T20:04:25,028 INFO [main]
> common.LSTBenchmarkExecutor: Running single_user
> phase...2023-10-26T20:05:11,658 INFO [main] telemetry.JDBCTelemetryRegistry:
> Flushing events to database...2023-10-26T20:05:19,703 INFO [main]
> telemetry.JDBCTelemetryRegistry: Events flushed to
> database.2023-10-26T20:05:19,711 INFO [main] common.LSTBenchmarkExecutor:
> Phase single_user finished in 46 seconds.2023-10-26T20:05:19,711 INFO [main]
> common.LSTBenchmarkExecutor: Running throughput_1
> phase...2023-10-26T20:06:05,667 INFO [main] telemetry.JDBCTelemetryRegistry:
> Flushing events to database...2023-10-26T20:06:13,344 INFO [main]
> telemetry.JDBCTelemetryRegistry: Events flushed to
> database.2023-10-26T20:06:13,351 INFO [main] common.LSTBenchmarkExecutor:
> Phase throughput_1 finished in 45 seconds.2023-10-26T20:06:13,352 INFO
> [main] common.LSTBenchmarkExecutor: Running data_maintenance_1
> phase...2023-10-26T20:07:12,354 ERROR [pool-2-thread-1]
> common.LSTBenchmarkExecutor: Exception executing statement:
> DF_CS-mixed.sql_32023-10-26T20:07:12,354 ERROR [pool-2-thread-1]
> common.LSTBenchmarkExecutor: Exception executing file:
> DF_CS-mixed.sql2023-10-26T20:07:12,354 ERROR [pool-2-thread-1]
> common.LSTBenchmarkExecutor: Exception executing task:
> data_maintenance_hudi_02023-10-26T20:07:12,357 ERROR [pool-2-thread-1]
> common.LSTBenchmarkExecutor: Exception executing session:
> 02023-10-26T20:07:12,358 WARN [main] common.LSTBenchmarkExecutor: Thread did
> not finish correctlyjava.util.concurrent.ExecutionException:
> java.sql.SQLException: org.apache.hive.service.cli.HiveSQLException: Error
> running query: org.apache.hudi.exception.HoodieIOException: Latest commit
> does not have any schema in commit metadata at
> org.apache.spark.sql.hive.thriftserver.HiveThriftServerErrors$.runningQueryError(HiveThriftServerErrors.scala:44)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:325)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$2(SparkExecuteStatementOperation.scala:230)
> at
> scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:79)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:63)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:43)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:230)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:225)
> at java.base/java.security.AccessController.doPrivileged(Native Method) at
> java.base/javax.security.auth.Subject.doAs(Subject.java:423) at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1878)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:239)
> at
> java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
> at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
> at java.base/java.lang.Thread.run(Thread.java:829)Caused by:
> org.apache.hudi.exception.HoodieIOException: Latest commit does not have any
> schema in commit metadata at
> org.apache.hudi.client.BaseHoodieWriteClient.setWriteSchemaForDeletes(BaseHoodieWriteClient.java:1335)
> at
> org.apache.hudi.client.BaseHoodieWriteClient.initTable(BaseHoodieWriteClient.java:1287)
> at
> org.apache.hudi.client.SparkRDDWriteClient.deletePrepped(SparkRDDWriteClient.java:253)
> at
> org.apache.hudi.DataSourceUtils.doDeleteOperation(DataSourceUtils.java:246)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.writeInternal(HoodieSparkSqlWriter.scala:380)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.write(HoodieSparkSqlWriter.scala:176)
> at
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:121)
> at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:150)
> at
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:47)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:75)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:73)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:104)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.executeQuery$1(SQLExecution.scala:114)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$7(SQLExecution.scala:139)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:139)
> at
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:245)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:138)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:101)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:97)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:179)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:602)
> at
> org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:97)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:82)
> at
> org.apache.spark.sql.execution.QueryExecution.assertCommandExecuted(QueryExecution.scala:125)
> at
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:860)
> at
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:390)
> at
> org.apache.spark.sql.DataFrameWriter.saveInternal(DataFrameWriter.scala:363)
> at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:247) at
> org.apache.spark.sql.hudi.command.DeleteHoodieTableCommand.run(DeleteHoodieTableCommand.scala:67)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:75)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:73)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:104)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.executeQuery$1(SQLExecution.scala:114)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$7(SQLExecution.scala:139)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:139)
> at
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:245)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:138)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:101)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:97)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:179)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:602)
> at
> org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:97)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:82)
> at org.apache.spark.sql.Dataset.<init>(Dataset.scala:222) at
> org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:102) at
> org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:99) at
> org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:622)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.SparkSession.sql(SparkSession.scala:617) at
> org.apache.spark.sql.SQLContext.sql(SQLContext.scala:651) at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:291)
> ... 16 more
> at java.util.concurrent.FutureTask.report(FutureTask.java:122) ~[?:?]
> at java.util.concurrent.FutureTask.get(FutureTask.java:191) ~[?:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor.checkResults(LSTBenchmarkExecutor.java:165)
> [lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor.execute(LSTBenchmarkExecutor.java:121)
> [lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.Driver.main(Driver.java:147)
> [lst-bench-0.1-SNAPSHOT.jar:?]Caused by: java.sql.SQLException:
> org.apache.hive.service.cli.HiveSQLException: Error running query:
> org.apache.hudi.exception.HoodieIOException: Latest commit does not have any
> schema in commit metadata at
> org.apache.spark.sql.hive.thriftserver.HiveThriftServerErrors$.runningQueryError(HiveThriftServerErrors.scala:44)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:325)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$2(SparkExecuteStatementOperation.scala:230)
> at
> scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:79)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:63)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:43)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:230)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:225)
> at java.base/java.security.AccessController.doPrivileged(Native Method) at
> java.base/javax.security.auth.Subject.doAs(Subject.java:423) at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1878)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:239)
> at
> java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
> at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
> at java.base/java.lang.Thread.run(Thread.java:829)Caused by:
> org.apache.hudi.exception.HoodieIOException: Latest commit does not have any
> schema in commit metadata at
> org.apache.hudi.client.BaseHoodieWriteClient.setWriteSchemaForDeletes(BaseHoodieWriteClient.java:1335)
> at
> org.apache.hudi.client.BaseHoodieWriteClient.initTable(BaseHoodieWriteClient.java:1287)
> at
> org.apache.hudi.client.SparkRDDWriteClient.deletePrepped(SparkRDDWriteClient.java:253)
> at
> org.apache.hudi.DataSourceUtils.doDeleteOperation(DataSourceUtils.java:246)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.writeInternal(HoodieSparkSqlWriter.scala:380)
> at
> org.apache.hudi.HoodieSparkSqlWriterInternal.write(HoodieSparkSqlWriter.scala:176)
> at
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:121)
> at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:150)
> at
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:47)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:75)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:73)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:104)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.executeQuery$1(SQLExecution.scala:114)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$7(SQLExecution.scala:139)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:139)
> at
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:245)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:138)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:101)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:97)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:179)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:602)
> at
> org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:97)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:82)
> at
> org.apache.spark.sql.execution.QueryExecution.assertCommandExecuted(QueryExecution.scala:125)
> at
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:860)
> at
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:390)
> at
> org.apache.spark.sql.DataFrameWriter.saveInternal(DataFrameWriter.scala:363)
> at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:247) at
> org.apache.spark.sql.hudi.command.DeleteHoodieTableCommand.run(DeleteHoodieTableCommand.scala:67)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:75)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:73)
> at
> org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:104)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.executeQuery$1(SQLExecution.scala:114)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$7(SQLExecution.scala:139)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:139)
> at
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:245)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:138)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:101)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:97)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:179)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:626)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
> at
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
> at
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:602)
> at
> org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:97)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:84)
> at
> org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:82)
> at org.apache.spark.sql.Dataset.<init>(Dataset.scala:222) at
> org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:102) at
> org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:99) at
> org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:622)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.SparkSession.sql(SparkSession.scala:617) at
> org.apache.spark.sql.SQLContext.sql(SQLContext.scala:651) at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:291)
> ... 16 more
> at
> org.apache.hive.jdbc.HiveStatement.waitForOperationToComplete(HiveStatement.java:401)
> ~[hive-jdbc-3.1.3.jar:3.1.3] at
> org.apache.hive.jdbc.HiveStatement.execute(HiveStatement.java:266)
> ~[hive-jdbc-3.1.3.jar:3.1.3] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.executeTask(LSTBenchmarkExecutor.java:274)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.call(LSTBenchmarkExecutor.java:248)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.call(LSTBenchmarkExecutor.java:222)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
> ~[?:?] at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
> ~[?:?] at java.lang.Thread.run(Thread.java:829) ~[?:?] {code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)