[
https://issues.apache.org/jira/browse/HUDI-6984?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Lin Liu closed HUDI-6984.
-------------------------
Resolution: Fixed
> query64 failed.
> ---------------
>
> Key: HUDI-6984
> URL: https://issues.apache.org/jira/browse/HUDI-6984
> Project: Apache Hudi
> Issue Type: Sub-task
> Reporter: Lin Liu
> Assignee: Lin Liu
> Priority: Major
>
> {code:java}
> [hadoop@ip-10-0-112-196 lst-bench]$ 2023-10-25T21:52:19,829 ERROR
> [pool-2-thread-1] common.LSTBenchmarkExecutor: Exception executing statement:
> query64.sql_02023-10-25T21:52:19,829 ERROR [pool-2-thread-1]
> common.LSTBenchmarkExecutor: Exception executing file:
> query64.sql2023-10-25T21:52:19,830 ERROR [pool-2-thread-1]
> common.LSTBenchmarkExecutor: Exception executing task:
> single_user_02023-10-25T21:52:19,834 ERROR [pool-2-thread-1]
> common.LSTBenchmarkExecutor: Exception executing session:
> 02023-10-25T21:52:19,834 WARN [main] common.LSTBenchmarkExecutor: Thread did
> not finish correctlyjava.util.concurrent.ExecutionException:
> java.sql.SQLException: org.apache.hive.service.cli.HiveSQLException: Error
> running query: org.apache.spark.SparkException: Job aborted due to stage
> failure: Task 851 in stage 3093.0 failed 4 times, most recent failure: Lost
> task 851.3 in stage 3093.0 (TID 666996)
> (ip-10-0-103-0.us-west-2.compute.internal executor 7): ExecutorLostFailure
> (executor 7 exited caused by one of the running tasks) Reason: Executor
> Process LostDriver stacktrace: at
> org.apache.spark.sql.hive.thriftserver.HiveThriftServerErrors$.runningQueryError(HiveThriftServerErrors.scala:43)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:325)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$2(SparkExecuteStatementOperation.scala:230)
> at
> scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:79)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:63)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:43)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:230)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:225)
> at java.base/java.security.AccessController.doPrivileged(Native Method) at
> java.base/javax.security.auth.Subject.doAs(Subject.java:423) at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1878)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:239)
> at
> java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
> at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
> at java.base/java.lang.Thread.run(Thread.java:829)Caused by:
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 851
> in stage 3093.0 failed 4 times, most recent failure: Lost task 851.3 in stage
> 3093.0 (TID 666996) (ip-10-0-103-0.us-west-2.compute.internal executor 7):
> ExecutorLostFailure (executor 7 exited caused by one of the running tasks)
> Reason: Executor Process LostDriver stacktrace: at
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2863)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2799)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2798)
> at
> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) at
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2798)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1239)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1239)
> at scala.Option.foreach(Option.scala:407) at
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1239)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3051)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2993)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2982)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) at
> org.apache.spark.sql.execution.adaptive.AdaptiveExecutor.checkNoFailures(AdaptiveExecutor.scala:154)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveExecutor.doRun(AdaptiveExecutor.scala:88)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveExecutor.tryRunningAndGetFuture(AdaptiveExecutor.scala:66)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveExecutor.execute(AdaptiveExecutor.scala:57)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.$anonfun$getFinalPhysicalPlan$1(AdaptiveSparkPlanExec.scala:249)
> at
> org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.getFinalPhysicalPlan(AdaptiveSparkPlanExec.scala:248)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.withFinalPlanUpdate(AdaptiveSparkPlanExec.scala:521)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.executeCollect(AdaptiveSparkPlanExec.scala:483)
> at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3932)
> at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:3161) at
> org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:3922)
> at
> org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:554)
> at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3920)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.executeQuery$1(SQLExecution.scala:114)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$7(SQLExecution.scala:139)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:139)
> at
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:245)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:138)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
> at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3920) at
> org.apache.spark.sql.Dataset.collect(Dataset.scala:3161) at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:300)
> ... 16 more
> at java.util.concurrent.FutureTask.report(FutureTask.java:122) ~[?:?]
> at java.util.concurrent.FutureTask.get(FutureTask.java:191) ~[?:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor.checkResults(LSTBenchmarkExecutor.java:165)
> [lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor.execute(LSTBenchmarkExecutor.java:121)
> [lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.Driver.main(Driver.java:147)
> [lst-bench-0.1-SNAPSHOT.jar:?]Caused by: java.sql.SQLException:
> org.apache.hive.service.cli.HiveSQLException: Error running query:
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 851
> in stage 3093.0 failed 4 times, most recent failure: Lost task 851.3 in stage
> 3093.0 (TID 666996) (ip-10-0-103-0.us-west-2.compute.internal executor 7):
> ExecutorLostFailure (executor 7 exited caused by one of the running tasks)
> Reason: Executor Process LostDriver stacktrace: at
> org.apache.spark.sql.hive.thriftserver.HiveThriftServerErrors$.runningQueryError(HiveThriftServerErrors.scala:43)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:325)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$2(SparkExecuteStatementOperation.scala:230)
> at
> scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:79)
> at
> org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:63)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:43)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:230)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:225)
> at java.base/java.security.AccessController.doPrivileged(Native Method) at
> java.base/javax.security.auth.Subject.doAs(Subject.java:423) at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1878)
> at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:239)
> at
> java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
> at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
> at
> java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
> at java.base/java.lang.Thread.run(Thread.java:829)Caused by:
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 851
> in stage 3093.0 failed 4 times, most recent failure: Lost task 851.3 in stage
> 3093.0 (TID 666996) (ip-10-0-103-0.us-west-2.compute.internal executor 7):
> ExecutorLostFailure (executor 7 exited caused by one of the running tasks)
> Reason: Executor Process LostDriver stacktrace: at
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2863)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2799)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2798)
> at
> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) at
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2798)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1239)
> at
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1239)
> at scala.Option.foreach(Option.scala:407) at
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1239)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3051)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2993)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2982)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) at
> org.apache.spark.sql.execution.adaptive.AdaptiveExecutor.checkNoFailures(AdaptiveExecutor.scala:154)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveExecutor.doRun(AdaptiveExecutor.scala:88)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveExecutor.tryRunningAndGetFuture(AdaptiveExecutor.scala:66)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveExecutor.execute(AdaptiveExecutor.scala:57)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.$anonfun$getFinalPhysicalPlan$1(AdaptiveSparkPlanExec.scala:249)
> at
> org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.getFinalPhysicalPlan(AdaptiveSparkPlanExec.scala:248)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.withFinalPlanUpdate(AdaptiveSparkPlanExec.scala:521)
> at
> org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec.executeCollect(AdaptiveSparkPlanExec.scala:483)
> at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3932)
> at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:3161) at
> org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:3922)
> at
> org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:554)
> at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3920)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.executeQuery$1(SQLExecution.scala:114)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$7(SQLExecution.scala:139)
> at
> org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:107)
> at
> org.apache.spark.sql.execution.SQLExecution$.withTracker(SQLExecution.scala:224)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:139)
> at
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:245)
> at
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:138)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
> at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3920) at
> org.apache.spark.sql.Dataset.collect(Dataset.scala:3161) at
> org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:300)
> ... 16 more
> at
> org.apache.hive.jdbc.HiveStatement.waitForOperationToComplete(HiveStatement.java:401)
> ~[hive-jdbc-3.1.3.jar:3.1.3] at
> org.apache.hive.jdbc.HiveStatement.execute(HiveStatement.java:266)
> ~[hive-jdbc-3.1.3.jar:3.1.3] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.executeTask(LSTBenchmarkExecutor.java:274)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.call(LSTBenchmarkExecutor.java:248)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> com.microsoft.lst_bench.common.LSTBenchmarkExecutor$Worker.call(LSTBenchmarkExecutor.java:222)
> ~[lst-bench-0.1-SNAPSHOT.jar:?] at
> java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
> ~[?:?] at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
> ~[?:?] at java.lang.Thread.run(Thread.java:829) ~[?:?]{code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)