[
https://issues.apache.org/jira/browse/HUDI-4771?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
nonggia.liang updated HUDI-4771:
--------------------------------
Description:
{code:java}
2022-03-21 17:19:02,498 ERROR org.apache.hudi.sink.compact.CompactFunction
[] - Executor executes action [Execute compaction for instant
20220320025438250 from task 8] errorjava.lang.ClassCastException:
org.apache.hudi.common.fs.HoodieWrapperFileSystem cannot be cast to
org.apache.hudi.common.fs.HoodieWrapperFileSystem at
org.apache.hudi.io.storage.HoodieParquetWriter.<init>(HoodieParquetWriter.java:63)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.storage.HoodieFileWriterFactory.newParquetFileWriter(HoodieFileWriterFactory.java:80)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.storage.HoodieFileWriterFactory.newParquetFileWriter(HoodieFileWriterFactory.java:67)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.storage.HoodieFileWriterFactory.getFileWriter(HoodieFileWriterFactory.java:53)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.HoodieWriteHandle.createNewFileWriter(HoodieWriteHandle.java:257)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.HoodieMergeHandle.init(HoodieMergeHandle.java:186)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.HoodieMergeHandle.<init>(HoodieMergeHandle.java:136)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.table.HoodieFlinkCopyOnWriteTable.getUpdateHandle(HoodieFlinkCopyOnWriteTable.java:395)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.table.HoodieFlinkCopyOnWriteTable.handleUpdate(HoodieFlinkCopyOnWriteTable.java:358)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.table.action.compact.HoodieCompactor.compact(HoodieCompactor.java:197)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.sink.compact.CompactFunction.doCompaction(CompactFunction.java:104)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.sink.compact.CompactFunction.lambda$processElement$0(CompactFunction.java:92)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.sink.utils.NonThrownExecutor.lambda$execute$0(NonThrownExecutor.java:93)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[?:1.8.0_261] at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[?:1.8.0_261] at java.lang.Thread.run(Thread.java:748) [?:1.8.0_261]
{code}
The stack is from version 0.10.1. The root cause of the exception is:
FileSystem provided by hadoop caches FileSystem instances globally.
In flink taskmanager process, the FileSystem class is loaded by
AppClassLoader,
and HoodieWrapperFileSystem is loaded by ChildFirstClassLoader.
When flink jobs recover from a failure (which is normal in k8s when the pod is
exceled), ChildFirstClassLoader will change, but the AppClassLoader stay
unchanged (so does the HoodieWrapperFileSystem instances cached). After
recoved, when the cached instance is returned and tried to cast to a different
class , the exception raises.
was:
{code:java}
2022-03-21 17:19:02,498 ERROR org.apache.hudi.sink.compact.CompactFunction
[] - Executor executes action [Execute compaction for instant
20220320025438250 from task 8] errorjava.lang.ClassCastException:
org.apache.hudi.common.fs.HoodieWrapperFileSystem cannot be cast to
org.apache.hudi.common.fs.HoodieWrapperFileSystem at
org.apache.hudi.io.storage.HoodieParquetWriter.<init>(HoodieParquetWriter.java:63)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.storage.HoodieFileWriterFactory.newParquetFileWriter(HoodieFileWriterFactory.java:80)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.storage.HoodieFileWriterFactory.newParquetFileWriter(HoodieFileWriterFactory.java:67)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.storage.HoodieFileWriterFactory.getFileWriter(HoodieFileWriterFactory.java:53)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.HoodieWriteHandle.createNewFileWriter(HoodieWriteHandle.java:257)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.HoodieMergeHandle.init(HoodieMergeHandle.java:186)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.io.HoodieMergeHandle.<init>(HoodieMergeHandle.java:136)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.table.HoodieFlinkCopyOnWriteTable.getUpdateHandle(HoodieFlinkCopyOnWriteTable.java:395)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.table.HoodieFlinkCopyOnWriteTable.handleUpdate(HoodieFlinkCopyOnWriteTable.java:358)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.table.action.compact.HoodieCompactor.compact(HoodieCompactor.java:197)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.sink.compact.CompactFunction.doCompaction(CompactFunction.java:104)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.sink.compact.CompactFunction.lambda$processElement$0(CompactFunction.java:92)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
org.apache.hudi.sink.utils.NonThrownExecutor.lambda$execute$0(NonThrownExecutor.java:93)
~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[?:1.8.0_261] at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[?:1.8.0_261] at java.lang.Thread.run(Thread.java:748) [?:1.8.0_261]
{code}
The stack is from version 0.10.1. The root cause of the exception is:
FileSystem provided by hadoop caches FileSystem instances globally.
In flink taskmanager process, the FileSystem class is loaded by
AppClassLoader,
and HoodieWrapperFileSystem is loaded by ChildFirstClassLoader.
When flink jobs recover from a failure (which is normal in k8s when the pod is
exceled), ChildFirstClassLoader will change, but the AppClassLoader stay
unchanged (so does the HoodieWrapperFileSystem instances cached). After
recoved, when the cached instance is returned and tried to cast the a different
class , the exception raises.
> ClassCastException when casting HoodieWrapperFileSystem in flink
> ----------------------------------------------------------------
>
> Key: HUDI-4771
> URL: https://issues.apache.org/jira/browse/HUDI-4771
> Project: Apache Hudi
> Issue Type: Bug
> Components: flink, flink-sql
> Reporter: nonggia.liang
> Priority: Major
>
> {code:java}
> 2022-03-21 17:19:02,498 ERROR org.apache.hudi.sink.compact.CompactFunction
> [] - Executor executes action [Execute compaction for instant
> 20220320025438250 from task 8] errorjava.lang.ClassCastException:
> org.apache.hudi.common.fs.HoodieWrapperFileSystem cannot be cast to
> org.apache.hudi.common.fs.HoodieWrapperFileSystem at
> org.apache.hudi.io.storage.HoodieParquetWriter.<init>(HoodieParquetWriter.java:63)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.io.storage.HoodieFileWriterFactory.newParquetFileWriter(HoodieFileWriterFactory.java:80)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.io.storage.HoodieFileWriterFactory.newParquetFileWriter(HoodieFileWriterFactory.java:67)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.io.storage.HoodieFileWriterFactory.getFileWriter(HoodieFileWriterFactory.java:53)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.io.HoodieWriteHandle.createNewFileWriter(HoodieWriteHandle.java:257)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.io.HoodieMergeHandle.init(HoodieMergeHandle.java:186)
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.io.HoodieMergeHandle.<init>(HoodieMergeHandle.java:136)
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.table.HoodieFlinkCopyOnWriteTable.getUpdateHandle(HoodieFlinkCopyOnWriteTable.java:395)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.table.HoodieFlinkCopyOnWriteTable.handleUpdate(HoodieFlinkCopyOnWriteTable.java:358)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.table.action.compact.HoodieCompactor.compact(HoodieCompactor.java:197)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.sink.compact.CompactFunction.doCompaction(CompactFunction.java:104)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.sink.compact.CompactFunction.lambda$processElement$0(CompactFunction.java:92)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> org.apache.hudi.sink.utils.NonThrownExecutor.lambda$execute$0(NonThrownExecutor.java:93)
>
> ~[blob_p-1b4939f2f6d2432512518a3c63d896f6a98801a6-2e9b5fd3f311ce685d0715e6e0382fd2:0.10.1]
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> [?:1.8.0_261] at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> [?:1.8.0_261] at java.lang.Thread.run(Thread.java:748) [?:1.8.0_261]
> {code}
> The stack is from version 0.10.1. The root cause of the exception is:
> FileSystem provided by hadoop caches FileSystem instances globally.
> In flink taskmanager process, the FileSystem class is loaded by
> AppClassLoader,
> and HoodieWrapperFileSystem is loaded by ChildFirstClassLoader.
> When flink jobs recover from a failure (which is normal in k8s when the pod
> is exceled), ChildFirstClassLoader will change, but the AppClassLoader stay
> unchanged (so does the HoodieWrapperFileSystem instances cached). After
> recoved, when the cached instance is returned and tried to cast to a
> different class , the exception raises.
--
This message was sent by Atlassian Jira
(v8.20.10#820010)