[ 
https://issues.apache.org/jira/browse/HUDI-2943?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17455984#comment-17455984
 ] 

Harsha Teja Kanna edited comment on HUDI-2943 at 12/8/21, 9:10 PM:
-------------------------------------------------------------------

I see this issue even after multiple retries of the run.

21/12/08 20:33:41 INFO Client:
client token: N/A
diagnostics: User class threw exception: 
org.apache.hudi.exception.HoodieUpsertException: Failed to upsert for commit 
time 20211208203215741
at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:62)
at 
org.apache.hudi.table.action.commit.SparkUpsertCommitActionExecutor.execute(SparkUpsertCommitActionExecutor.java:46)
at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:119)
at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:103)
at 
org.apache.hudi.client.SparkRDDWriteClient.upsert(SparkRDDWriteClient.java:159)
at 
org.apache.hudi.utilities.deltastreamer.DeltaSync.writeToSink(DeltaSync.java:501)
at 
org.apache.hudi.utilities.deltastreamer.DeltaSync.syncOnce(DeltaSync.java:306)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.lambda$sync$2(HoodieDeltaStreamer.java:193)
at org.apache.hudi.common.util.Option.ifPresent(Option.java:96)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.sync(HoodieDeltaStreamer.java:191)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.main(HoodieDeltaStreamer.java:511)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:735)
Caused by: org.apache.hudi.exception.HoodieClusteringUpdateException: Not 
allowed to update the clustering file group 
HoodieFileGroupId\{partitionPath='lastdate=2021/12/07', 
fileId='5bae5668-ad51-4ea5-9e69-f39b4df5834b-0'}. For pending clustering 
operations, we are not going to support update for now.
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.lambda$handleUpdate$0(SparkRejectUpdateStrategy.java:65)
at java.lang.Iterable.forEach(Iterable.java:75)
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.handleUpdate(SparkRejectUpdateStrategy.java:59)
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.handleUpdate(SparkRejectUpdateStrategy.java:42)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.clusteringHandleUpdate(BaseSparkCommitActionExecutor.java:123)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:165)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:82)
at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:55)
... 15 more

ApplicationMaster host: ip-10-0-4-50
ApplicationMaster RPC port: 41499
queue: default
start time: 1638995434548
final status: FAILED
tracking URL: [http://ip-10-0-4-49:20888/proxy/application_1638781912156_2301/]
user: hadoop
21/12/08 20:33:41 ERROR Client: Application diagnostics message: User class 
threw exception: org.apache.hudi.exception.HoodieUpsertException: Failed to 
upsert for commit time 20211208203215741
at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:62)
at 
org.apache.hudi.table.action.commit.SparkUpsertCommitActionExecutor.execute(SparkUpsertCommitActionExecutor.java:46)
at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:119)
at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:103)
at 
org.apache.hudi.client.SparkRDDWriteClient.upsert(SparkRDDWriteClient.java:159)
at 
org.apache.hudi.utilities.deltastreamer.DeltaSync.writeToSink(DeltaSync.java:501)
at 
org.apache.hudi.utilities.deltastreamer.DeltaSync.syncOnce(DeltaSync.java:306)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.lambda$sync$2(HoodieDeltaStreamer.java:193)
at org.apache.hudi.common.util.Option.ifPresent(Option.java:96)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.sync(HoodieDeltaStreamer.java:191)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.main(HoodieDeltaStreamer.java:511)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:735)
Caused by: org.apache.hudi.exception.HoodieClusteringUpdateException: Not 
allowed to update the clustering file group 
HoodieFileGroupId\{partitionPath='lastdate=2021/12/07', 
fileId='5bae5668-ad51-4ea5-9e69-f39b4df5834b-0'}. For pending clustering 
operations, we are not going to support update for now.
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.lambda$handleUpdate$0(SparkRejectUpdateStrategy.java:65)
at java.lang.Iterable.forEach(Iterable.java:75)
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.handleUpdate(SparkRejectUpdateStrategy.java:59)
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.handleUpdate(SparkRejectUpdateStrategy.java:42)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.clusteringHandleUpdate(BaseSparkCommitActionExecutor.java:123)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:165)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:82)
at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:55)
... 15 more

Exception in thread "main" org.apache.spark.SparkException: Application 
application_1638781912156_2301 finished with failed status
at org.apache.spark.deploy.yarn.Client.run(Client.scala:1253)
at org.apache.spark.deploy.yarn.YarnClusterApplication.start(Client.scala:1645)
at 
org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:959)
at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)
at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)
at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)
at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1047)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1056)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
21/12/08 20:33:41 INFO ShutdownHookManager: Shutdown hook called
21/12/08 20:33:41 INFO ShutdownHookManager: Deleting directory 
/mnt/tmp/spark-6166a02f-8c72-4e3a-91ab-bd15ab383994
21/12/08 20:33:41 INFO ShutdownHookManager: Deleting directory 
/mnt/tmp/spark-73462b7d-85eb-4bf1-8a6e-bc0391559846


!image-2021-12-08-15-10-02-420.png!


was (Author: h7kanna):
I see this issue even after multiple retries of the run.



21/12/08 20:33:41 INFO Client:
client token: N/A
diagnostics: User class threw exception: 
org.apache.hudi.exception.HoodieUpsertException: Failed to upsert for commit 
time 20211208203215741
at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:62)
at 
org.apache.hudi.table.action.commit.SparkUpsertCommitActionExecutor.execute(SparkUpsertCommitActionExecutor.java:46)
at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:119)
at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:103)
at 
org.apache.hudi.client.SparkRDDWriteClient.upsert(SparkRDDWriteClient.java:159)
at 
org.apache.hudi.utilities.deltastreamer.DeltaSync.writeToSink(DeltaSync.java:501)
at 
org.apache.hudi.utilities.deltastreamer.DeltaSync.syncOnce(DeltaSync.java:306)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.lambda$sync$2(HoodieDeltaStreamer.java:193)
at org.apache.hudi.common.util.Option.ifPresent(Option.java:96)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.sync(HoodieDeltaStreamer.java:191)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.main(HoodieDeltaStreamer.java:511)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:735)
Caused by: org.apache.hudi.exception.HoodieClusteringUpdateException: Not 
allowed to update the clustering file group 
HoodieFileGroupId\{partitionPath='lastdate=2021/12/07', 
fileId='5bae5668-ad51-4ea5-9e69-f39b4df5834b-0'}. For pending clustering 
operations, we are not going to support update for now.
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.lambda$handleUpdate$0(SparkRejectUpdateStrategy.java:65)
at java.lang.Iterable.forEach(Iterable.java:75)
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.handleUpdate(SparkRejectUpdateStrategy.java:59)
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.handleUpdate(SparkRejectUpdateStrategy.java:42)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.clusteringHandleUpdate(BaseSparkCommitActionExecutor.java:123)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:165)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:82)
at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:55)
... 15 more

ApplicationMaster host: ip-10-0-4-50
ApplicationMaster RPC port: 41499
queue: default
start time: 1638995434548
final status: FAILED
tracking URL: http://ip-10-0-4-49:20888/proxy/application_1638781912156_2301/
user: hadoop
21/12/08 20:33:41 ERROR Client: Application diagnostics message: User class 
threw exception: org.apache.hudi.exception.HoodieUpsertException: Failed to 
upsert for commit time 20211208203215741
at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:62)
at 
org.apache.hudi.table.action.commit.SparkUpsertCommitActionExecutor.execute(SparkUpsertCommitActionExecutor.java:46)
at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:119)
at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:103)
at 
org.apache.hudi.client.SparkRDDWriteClient.upsert(SparkRDDWriteClient.java:159)
at 
org.apache.hudi.utilities.deltastreamer.DeltaSync.writeToSink(DeltaSync.java:501)
at 
org.apache.hudi.utilities.deltastreamer.DeltaSync.syncOnce(DeltaSync.java:306)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.lambda$sync$2(HoodieDeltaStreamer.java:193)
at org.apache.hudi.common.util.Option.ifPresent(Option.java:96)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.sync(HoodieDeltaStreamer.java:191)
at 
org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.main(HoodieDeltaStreamer.java:511)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:735)
Caused by: org.apache.hudi.exception.HoodieClusteringUpdateException: Not 
allowed to update the clustering file group 
HoodieFileGroupId\{partitionPath='lastdate=2021/12/07', 
fileId='5bae5668-ad51-4ea5-9e69-f39b4df5834b-0'}. For pending clustering 
operations, we are not going to support update for now.
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.lambda$handleUpdate$0(SparkRejectUpdateStrategy.java:65)
at java.lang.Iterable.forEach(Iterable.java:75)
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.handleUpdate(SparkRejectUpdateStrategy.java:59)
at 
org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.handleUpdate(SparkRejectUpdateStrategy.java:42)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.clusteringHandleUpdate(BaseSparkCommitActionExecutor.java:123)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:165)
at 
org.apache.hudi.table.action.commit.BaseSparkCommitActionExecutor.execute(BaseSparkCommitActionExecutor.java:82)
at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:55)
... 15 more

Exception in thread "main" org.apache.spark.SparkException: Application 
application_1638781912156_2301 finished with failed status
at org.apache.spark.deploy.yarn.Client.run(Client.scala:1253)
at org.apache.spark.deploy.yarn.YarnClusterApplication.start(Client.scala:1645)
at 
org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:959)
at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)
at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)
at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)
at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1047)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1056)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
21/12/08 20:33:41 INFO ShutdownHookManager: Shutdown hook called
21/12/08 20:33:41 INFO ShutdownHookManager: Deleting directory 
/mnt/tmp/spark-6166a02f-8c72-4e3a-91ab-bd15ab383994
21/12/08 20:33:41 INFO ShutdownHookManager: Deleting directory 
/mnt/tmp/spark-73462b7d-85eb-4bf1-8a6e-bc0391559846

> Deltastreamer fails to continue with pending clustering after restart in 
> 0.10.0
> -------------------------------------------------------------------------------
>
>                 Key: HUDI-2943
>                 URL: https://issues.apache.org/jira/browse/HUDI-2943
>             Project: Apache Hudi
>          Issue Type: Bug
>          Components: DeltaStreamer
>            Reporter: Harsha Teja Kanna
>            Priority: Major
>         Attachments: image-2021-12-08-15-10-02-420.png
>
>
> Deltastreamer fails to restart when there is a pending clustering commit from 
> a previous run with Upsert failed exception when inline clustering is on.
> Hudi version : 0.10.0
> Spark version : 3.1.2
> EMR : 6.4.0
> diagnostics: User class threw exception: 
> org.apache.hudi.exception.HoodieUpsertException: Failed to upsert for commit 
> time 20211206081248919
> at 
> org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:62)
> at 
> org.apache.hudi.table.action.commit.SparkUpsertCommitActionExecutor.execute(SparkUpsertCommitActionExecutor.java:46)
> at 
> org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:119)
> at 
> org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:103)
> at 
> org.apache.hudi.client.SparkRDDWriteClient.upsert(SparkRDDWriteClient.java:159)
> at 
> org.apache.hudi.utilities.deltastreamer.DeltaSync.writeToSink(DeltaSync.java:501)
> at 
> org.apache.hudi.utilities.deltastreamer.DeltaSync.syncOnce(DeltaSync.java:306)
> at 
> org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.lambda$sync$2(HoodieDeltaStreamer.java:193)
> at org.apache.hudi.common.util.Option.ifPresent(Option.java:96)
> at 
> org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.sync(HoodieDeltaStreamer.java:191)
> at 
> org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.main(HoodieDeltaStreamer.java:511)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498)
> at 
> org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:735)
> Caused by: org.apache.hudi.exception.HoodieClusteringUpdateException: Not 
> allowed to update the clustering file group 
> HoodieFileGroupId\{partitionPath='', 
> fileId='39ca735d-1fc4-40f9-a314-93744642b38c-0'}. For pending clustering 
> operations, we are not going to support update for now.
> at 
> org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy.lambda$handleUpdate$0(SparkRejectUpdateStrategy.java:65)
> Config:
> hoodie.index.type=GLOBAL_SIMPLE
> hoodie.datasource.write.partitionpath.field=
> hoodie.datasource.write.precombine.field=updatedate
> hoodie.datasource.hive_sync.database=datalake
> hoodie.datasource.write.operation=upsert
> hoodie.datasource.hive_sync.table=hudi.prd.surveys
> hoodie.datasource.hive_sync.mode=hms
> hoodie.datasource.hive_sync.enable=false
> hoodie.datasource.hive_sync.partition_extractor_class=org.apache.hudi.hive.MultiPartKeysValueExtractor
> hoodie.datasource.hive_sync.use_jdbc=false
> hoodie.datasource.write.recordkey.field=id
> hoodie.datasource.write.keygenerator.class=org.apache.hudi.keygen.CustomKeyGenerator
> hoodie.datasource.write.hive_style_partitioning=true
> hoodie.finalize.write.parallelism=256
> hoodie.deltastreamer.source.dfs.root=s3://datalake-bucket/raw/parquet/data/surveys/year=2021/month=12/day=06/hour=16
> hoodie.deltastreamer.source.input.selector=org.apache.hudi.utilities.sources.helpers.DFSPathSelector
> hoodie.parquet.max.file.size=134217728
> hoodie.parquet.small.file.limit=67108864
> hoodie.parquet.block.size=134217728
> hoodie.parquet.compression.codec=snappy
> hoodie.file.listing.parallelism=256
> hoodie.upsert.shuffle.parallelism=10
> hoodie.metadata.enable=false
> hoodie.metadata.clean.async=true
> hoodie.clustering.preserve.commit.metadata=true
> hoodie.clustering.inline.max.commits=1
> hoodie.clustering.inline=true
> hoodie.clustering.plan.strategy.target.file.max.bytes=134217728
> hoodie.clustering.plan.strategy.small.file.limit=67108864
> hoodie.clustering.plan.strategy.sort.columns=projectid
> hoodie.clustering.plan.strategy.class=org.apache.hudi.client.clustering.plan.strategy.SparkRecentDaysClusteringPlanStrategy
> hoodie.clean.async=true
> hoodie.clean.automatic=true
> hoodie.cleaner.policy=KEEP_LATEST_COMMITS
> hoodie.cleaner.commits.retained=10
> hoodie.deltastreamer.transformer.sql=SELECT id, sid FROM <SRC> a



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

Reply via email to