[ 
https://issues.apache.org/jira/browse/HUDI-2904?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Rajesh Mahindra resolved HUDI-2904.
-----------------------------------

> Failed to archive commits due to no such file in metadata
> ---------------------------------------------------------
>
>                 Key: HUDI-2904
>                 URL: https://issues.apache.org/jira/browse/HUDI-2904
>             Project: Apache Hudi
>          Issue Type: Bug
>            Reporter: Ethan Guo
>            Assignee: Rajesh Mahindra
>            Priority: Blocker
>              Labels: pull-request-available
>             Fix For: 0.10.0
>
>
> Hitting the following exception while running DeltaStreamer continuous mode 
> on a COW table on S3:
> {code:java}
> java.util.concurrent.ExecutionException: 
> org.apache.hudi.exception.HoodieClusteringException: unable to transition 
> clustering inflight to complete: 20211201011347895
>       at 
> java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357)
>       at 
> java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1908)
>       at 
> org.apache.hudi.async.HoodieAsyncService.lambda$monitorThreads$1(HoodieAsyncService.java:158)
>       at 
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>       at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>       at java.lang.Thread.run(Thread.java:748)
> Caused by: org.apache.hudi.exception.HoodieClusteringException: unable to 
> transition clustering inflight to complete: 20211201011347895
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.completeClustering(SparkRDDWriteClient.java:395)
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.completeTableService(SparkRDDWriteClient.java:470)
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.cluster(SparkRDDWriteClient.java:364)
>       at 
> org.apache.hudi.client.HoodieSparkClusteringClient.cluster(HoodieSparkClusteringClient.java:54)
>       at 
> org.apache.hudi.async.AsyncClusteringService.lambda$null$1(AsyncClusteringService.java:79)
>       at 
> java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1604)
>       ... 3 more
> Caused by: org.apache.hudi.exception.HoodieCommitException: Failed to archive 
> commits
>       at 
> org.apache.hudi.table.HoodieTimelineArchiveLog.archive(HoodieTimelineArchiveLog.java:334)
>       at 
> org.apache.hudi.table.HoodieTimelineArchiveLog.archiveIfRequired(HoodieTimelineArchiveLog.java:130)
>       at 
> org.apache.hudi.client.AbstractHoodieWriteClient.postCommit(AbstractHoodieWriteClient.java:454)
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.postWrite(SparkRDDWriteClient.java:280)
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.upsertPreppedRecords(SparkRDDWriteClient.java:173)
>       at 
> org.apache.hudi.metadata.SparkHoodieBackedTableMetadataWriter.commit(SparkHoodieBackedTableMetadataWriter.java:146)
>       at 
> org.apache.hudi.metadata.HoodieBackedTableMetadataWriter.processAndCommit(HoodieBackedTableMetadataWriter.java:590)
>       at 
> org.apache.hudi.metadata.HoodieBackedTableMetadataWriter.update(HoodieBackedTableMetadataWriter.java:602)
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.lambda$writeTableMetadataForTableServices$5(SparkRDDWriteClient.java:420)
>       at org.apache.hudi.common.util.Option.ifPresent(Option.java:96)
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.writeTableMetadataForTableServices(SparkRDDWriteClient.java:419)
>       at 
> org.apache.hudi.client.SparkRDDWriteClient.completeClustering(SparkRDDWriteClient.java:384)
>       ... 8 more
> Caused by: org.apache.hudi.exception.HoodieIOException: Could not read commit 
> details from 
> s3a://hudi-testing/test_hoodie_table_2/.hoodie/metadata/.hoodie/20211201002149590.deltacommit.requested
>       at 
> org.apache.hudi.common.table.timeline.HoodieActiveTimeline.readDataFromPath(HoodieActiveTimeline.java:634)
>       at 
> org.apache.hudi.common.table.timeline.HoodieActiveTimeline.getInstantDetails(HoodieActiveTimeline.java:250)
>       at 
> org.apache.hudi.client.utils.MetadataConversionUtils.createMetaWrapper(MetadataConversionUtils.java:72)
>       at 
> org.apache.hudi.table.HoodieTimelineArchiveLog.convertToAvroRecord(HoodieTimelineArchiveLog.java:358)
>       at 
> org.apache.hudi.table.HoodieTimelineArchiveLog.archive(HoodieTimelineArchiveLog.java:321)
>       ... 19 more
> Caused by: java.io.FileNotFoundException: No such file or directory: 
> s3a://hudi-testing/test_hoodie_table_2/.hoodie/metadata/.hoodie/20211201002149590.deltacommit.requested
>       at 
> org.apache.hadoop.fs.s3a.S3AFileSystem.s3GetFileStatus(S3AFileSystem.java:3356)
>       at 
> org.apache.hadoop.fs.s3a.S3AFileSystem.innerGetFileStatus(S3AFileSystem.java:3185)
>       at 
> org.apache.hadoop.fs.s3a.S3AFileSystem.extractOrFetchSimpleFileStatus(S3AFileSystem.java:4903)
>       at org.apache.hadoop.fs.s3a.S3AFileSystem.open(S3AFileSystem.java:1200)
>       at org.apache.hadoop.fs.s3a.S3AFileSystem.open(S3AFileSystem.java:1178)
>       at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:976)
>       at 
> org.apache.hudi.common.fs.HoodieWrapperFileSystem.open(HoodieWrapperFileSystem.java:459)
>       at 
> org.apache.hudi.common.table.timeline.HoodieActiveTimeline.readDataFromPath(HoodieActiveTimeline.java:631)
>       ... 23 more{code}
> DeltaStreamer command:
> {code:java}
> spark-3.2.0-bin-hadoop3.2/bin/spark-submit --conf 
> spark.hadoop.fs.s3a.aws.credentials.provider=com.amazonaws.auth.DefaultAWSCredentialsProviderChain
>  \
>                                        --packages 
> org.apache.spark:spark-avro_2.12:3.2.0 \
>                                        --class 
> org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer \
>                                        
> hudi/packaging/hudi-utilities-bundle/target/hudi-utilities-bundle_2.12-0.10.0-rc2.jar
>  \
>                                        --props test.properties \
>                                        --schemaprovider-class 
> org.apache.hudi.utilities.schema.FilebasedSchemaProvider \
>                                        --source-class 
> org.apache.hudi.utilities.sources.ParquetDFSSource \
>                                        --source-ordering-field ts \
>                                        --source-limit 50485760 \
>                                        --target-base-path 
> s3a://hudi-testing/test_hoodie_table_2/ \
>                                        --target-table test_table \
>                                        --table-type COPY_ON_WRITE \
>                                        --op BULK_INSERT \
>                                        --continuous {code}
> test.properties
> {code:java}
> hoodie.upsert.shuffle.parallelism=2
> hoodie.insert.shuffle.parallelism=2
> hoodie.delete.shuffle.parallelism=2
> hoodie.bulkinsert.shuffle.parallelism=2
> hoodie.embed.timeline.server=true
> hoodie.filesystem.view.type=EMBEDDED_KV_STORE
> hoodie.compact.inline=false
> # Key fields, for kafka example
> hoodie.datasource.write.recordkey.field=uuid
> hoodie.datasource.write.partitionpath.field=partitionpath
> # Schema provider props (change to absolute path based on your installation)
> hoodie.deltastreamer.schemaprovider.source.schema.file=/Users/user/repo/schema.avsc
> hoodie.deltastreamer.schemaprovider.target.schema.file=/Users/user/repo/schema.avsc
> # DFS Source
> hoodie.deltastreamer.source.dfs.root=s3a://hudi-testing/test_input_data/
> hoodie.clustering.async.enabled=true
> hoodie.parquet.max.file.size=1048576
> hoodie.parquet.block.size=1048576 {code}
> Fullstack trace:
> [https://gist.github.com/yihua/93662b8d094ca2ac31412726a2966817]
>  



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

Reply via email to