[
https://issues.apache.org/jira/browse/FLINK-30915?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Samrat Deb updated FLINK-30915:
-------------------------------
Description:
"flink-fs-s3-hadoop" connectors were not able to find the credentials just fine
when configured with the {{WebIdentityTokenCredentialsProvider. }}
when I try to use Flink's s3 connector I get access denied, so then I made sure
to set the correct identity provider in my flink-conf, which was set to the
following
{code:java}
hadoop.fs.s3a.aws.credentials.provider:
"com.amazonaws.auth.WebIdentityTokenCredentialsProvider"{code}
{code:java}
2023-02-02 21:02:06,214 INFO
akka.remote.RemoteActorRefProvider$RemotingTerminator [] - Remoting shut
down.
2023-02-02 21:02:06,293 ERROR
org.apache.flink.runtime.entrypoint.ClusterEntrypoint [] - Could not
start cluster entrypoint KubernetesApplicationClusterEntrypoint.
org.apache.flink.runtime.entrypoint.ClusterEntrypointException: Failed to
initialize the cluster entrypoint KubernetesApplicationClusterEntrypoint.
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:255)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runClusterEntrypoint(ClusterEntrypoint.java:729)
[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.kubernetes.entrypoint.KubernetesApplicationClusterEntrypoint.main(KubernetesApplicationClusterEntrypoint.java:86)
[flink-dist-1.16.0.jar:1.16.0]
Caused by: org.apache.flink.util.FlinkException: Could not create the ha
services from the instantiated HighAvailabilityServicesFactory
org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory.
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:299)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:285)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createHighAvailabilityServices(HighAvailabilityServicesUtils.java:145)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.createHaServices(ClusterEntrypoint.java:439)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.initializeServices(ClusterEntrypoint.java:382)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runCluster(ClusterEntrypoint.java:282)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.lambda$startCluster$1(ClusterEntrypoint.java:232)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.security.contexts.NoOpSecurityContext.runSecured(NoOpSecurityContext.java:28)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:229)
~[flink-dist-1.16.0.jar:1.16.0]
... 2 more
Caused by: java.nio.file.AccessDeniedException:
s3://<bucket>/flink-ha/basic-example/blob: getFileStatus on
s3://<bucket>/flink-ha/basic-example/blob:
com.amazonaws.services.s3.model.AmazonS3Exception: Access Denied (Service:
Amazon S3; Status Code: 403; Error Code: AccessDenied; Request ID:
CXJ8Y79Z8SYTBEFM; S3 Extended Request ID: 1234567/1234567; Proxy: null), S3
Extended Request ID:123454321/123232:AccessDenied
at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:255)
~[?:?]
at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:175)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem.s3GetFileStatus(S3AFileSystem.java:3858)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem.innerGetFileStatus(S3AFileSystem.java:3688)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem$MkdirOperationCallbacksImpl.probePathStatus(S3AFileSystem.java:3455)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.MkdirOperation.probePathStatusOrNull(MkdirOperation.java:135)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.MkdirOperation.getPathStatusExpectingDir(MkdirOperation.java:150)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.MkdirOperation.execute(MkdirOperation.java:80)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.MkdirOperation.execute(MkdirOperation.java:45)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.ExecutingStoreOperation.apply(ExecutingStoreOperation.java:76)
~[?:?]
at
org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.lambda$trackDurationOfOperation$5(IOStatisticsBinding.java:499)
~[?:?]
at
org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration(IOStatisticsBinding.java:444)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2337)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2356)
~[?:?]
at org.apache.hadoop.fs.s3a.S3AFileSystem.mkdirs(S3AFileSystem.java:3428)
~[?:?]
at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2449) ~[?:?]
at
org.apache.flink.fs.s3hadoop.common.HadoopFileSystem.mkdirs(HadoopFileSystem.java:183)
~[?:?]
at
org.apache.flink.core.fs.PluginFileSystemFactory$ClassLoaderFixingFileSystem.mkdirs(PluginFileSystemFactory.java:162)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.blob.FileSystemBlobStore.<init>(FileSystemBlobStore.java:64)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.blob.BlobUtils.createFileSystemBlobStore(BlobUtils.java:108)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.blob.BlobUtils.createBlobStoreFromConfig(BlobUtils.java:86)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory.createHAServices(KubernetesHaServicesFactory.java:41)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:296)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:285)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createHighAvailabilityServices(HighAvailabilityServicesUtils.java:145)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.createHaServices(ClusterEntrypoint.java:439)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.initializeServices(ClusterEntrypoint.java:382)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runCluster(ClusterEntrypoint.java:282)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.lambda$startCluster$1(ClusterEntrypoint.java:232)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.security.contexts.NoOpSecurityContext.runSecured(NoOpSecurityContext.java:28)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:229)
~[flink-dist-1.16.0.jar:1.16.0]
... 2 more{code}
was:
"flink-fs-s3-hadoop" connectors were not able to find the credentials just fine
when configured with the {{WebIdentityTokenCredentialsProvider. }}
{{}}
when I try to use Flinks s3 connector I get access denied, so then I made sure
to set the correct identity provider in my flink-conf, which was set to the
following
{{}}
{{hadoop.fs.s3a.aws.credentials.provider:
"com.amazonaws.auth.WebIdentityTokenCredentialsProvider"}}
{{}}{{}}
{code:java}
2023-02-02 21:02:06,214 INFO
akka.remote.RemoteActorRefProvider$RemotingTerminator [] - Remoting shut
down.
2023-02-02 21:02:06,293 ERROR
org.apache.flink.runtime.entrypoint.ClusterEntrypoint [] - Could not
start cluster entrypoint KubernetesApplicationClusterEntrypoint.
org.apache.flink.runtime.entrypoint.ClusterEntrypointException: Failed to
initialize the cluster entrypoint KubernetesApplicationClusterEntrypoint.
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:255)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runClusterEntrypoint(ClusterEntrypoint.java:729)
[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.kubernetes.entrypoint.KubernetesApplicationClusterEntrypoint.main(KubernetesApplicationClusterEntrypoint.java:86)
[flink-dist-1.16.0.jar:1.16.0]
Caused by: org.apache.flink.util.FlinkException: Could not create the ha
services from the instantiated HighAvailabilityServicesFactory
org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory.
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:299)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:285)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createHighAvailabilityServices(HighAvailabilityServicesUtils.java:145)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.createHaServices(ClusterEntrypoint.java:439)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.initializeServices(ClusterEntrypoint.java:382)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runCluster(ClusterEntrypoint.java:282)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.lambda$startCluster$1(ClusterEntrypoint.java:232)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.security.contexts.NoOpSecurityContext.runSecured(NoOpSecurityContext.java:28)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:229)
~[flink-dist-1.16.0.jar:1.16.0]
... 2 more
Caused by: java.nio.file.AccessDeniedException:
s3://<bucket>/flink-ha/basic-example/blob: getFileStatus on
s3://<bucket>/flink-ha/basic-example/blob:
com.amazonaws.services.s3.model.AmazonS3Exception: Access Denied (Service:
Amazon S3; Status Code: 403; Error Code: AccessDenied; Request ID:
CXJ8Y79Z8SYTBEFM; S3 Extended Request ID: 1234567/1234567; Proxy: null), S3
Extended Request ID:123454321/123232:AccessDenied
at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:255)
~[?:?]
at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:175)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem.s3GetFileStatus(S3AFileSystem.java:3858)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem.innerGetFileStatus(S3AFileSystem.java:3688)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem$MkdirOperationCallbacksImpl.probePathStatus(S3AFileSystem.java:3455)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.MkdirOperation.probePathStatusOrNull(MkdirOperation.java:135)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.MkdirOperation.getPathStatusExpectingDir(MkdirOperation.java:150)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.MkdirOperation.execute(MkdirOperation.java:80)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.MkdirOperation.execute(MkdirOperation.java:45)
~[?:?]
at
org.apache.hadoop.fs.s3a.impl.ExecutingStoreOperation.apply(ExecutingStoreOperation.java:76)
~[?:?]
at
org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.lambda$trackDurationOfOperation$5(IOStatisticsBinding.java:499)
~[?:?]
at
org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration(IOStatisticsBinding.java:444)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2337)
~[?:?]
at
org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2356)
~[?:?]
at org.apache.hadoop.fs.s3a.S3AFileSystem.mkdirs(S3AFileSystem.java:3428)
~[?:?]
at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2449) ~[?:?]
at
org.apache.flink.fs.s3hadoop.common.HadoopFileSystem.mkdirs(HadoopFileSystem.java:183)
~[?:?]
at
org.apache.flink.core.fs.PluginFileSystemFactory$ClassLoaderFixingFileSystem.mkdirs(PluginFileSystemFactory.java:162)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.blob.FileSystemBlobStore.<init>(FileSystemBlobStore.java:64)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.blob.BlobUtils.createFileSystemBlobStore(BlobUtils.java:108)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.blob.BlobUtils.createBlobStoreFromConfig(BlobUtils.java:86)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory.createHAServices(KubernetesHaServicesFactory.java:41)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:296)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:285)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createHighAvailabilityServices(HighAvailabilityServicesUtils.java:145)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.createHaServices(ClusterEntrypoint.java:439)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.initializeServices(ClusterEntrypoint.java:382)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runCluster(ClusterEntrypoint.java:282)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.lambda$startCluster$1(ClusterEntrypoint.java:232)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.security.contexts.NoOpSecurityContext.runSecured(NoOpSecurityContext.java:28)
~[flink-dist-1.16.0.jar:1.16.0]
at
org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:229)
~[flink-dist-1.16.0.jar:1.16.0]
... 2 more{code}
> `flink-fs-hadoop-s3` connector is unable to find IRSA credentials
> -----------------------------------------------------------------
>
> Key: FLINK-30915
> URL: https://issues.apache.org/jira/browse/FLINK-30915
> Project: Flink
> Issue Type: Improvement
> Affects Versions: 1.16.0
> Reporter: Samrat Deb
> Priority: Major
>
>
> "flink-fs-s3-hadoop" connectors were not able to find the credentials just
> fine when configured with the {{WebIdentityTokenCredentialsProvider. }}
>
> when I try to use Flink's s3 connector I get access denied, so then I made
> sure to set the correct identity provider in my flink-conf, which was set to
> the following
>
>
> {code:java}
> hadoop.fs.s3a.aws.credentials.provider:
> "com.amazonaws.auth.WebIdentityTokenCredentialsProvider"{code}
> {code:java}
> 2023-02-02 21:02:06,214 INFO
> akka.remote.RemoteActorRefProvider$RemotingTerminator [] - Remoting
> shut down.
> 2023-02-02 21:02:06,293 ERROR
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint [] - Could not
> start cluster entrypoint KubernetesApplicationClusterEntrypoint.
> org.apache.flink.runtime.entrypoint.ClusterEntrypointException: Failed to
> initialize the cluster entrypoint KubernetesApplicationClusterEntrypoint.
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:255)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runClusterEntrypoint(ClusterEntrypoint.java:729)
> [flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.kubernetes.entrypoint.KubernetesApplicationClusterEntrypoint.main(KubernetesApplicationClusterEntrypoint.java:86)
> [flink-dist-1.16.0.jar:1.16.0]
> Caused by: org.apache.flink.util.FlinkException: Could not create the ha
> services from the instantiated HighAvailabilityServicesFactory
> org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory.
> at
> org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:299)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:285)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createHighAvailabilityServices(HighAvailabilityServicesUtils.java:145)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.createHaServices(ClusterEntrypoint.java:439)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.initializeServices(ClusterEntrypoint.java:382)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runCluster(ClusterEntrypoint.java:282)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.lambda$startCluster$1(ClusterEntrypoint.java:232)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.security.contexts.NoOpSecurityContext.runSecured(NoOpSecurityContext.java:28)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:229)
> ~[flink-dist-1.16.0.jar:1.16.0]
> ... 2 more
> Caused by: java.nio.file.AccessDeniedException:
> s3://<bucket>/flink-ha/basic-example/blob: getFileStatus on
> s3://<bucket>/flink-ha/basic-example/blob:
> com.amazonaws.services.s3.model.AmazonS3Exception: Access Denied (Service:
> Amazon S3; Status Code: 403; Error Code: AccessDenied; Request ID:
> CXJ8Y79Z8SYTBEFM; S3 Extended Request ID: 1234567/1234567; Proxy: null), S3
> Extended Request ID:123454321/123232:AccessDenied
> at
> org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:255) ~[?:?]
> at
> org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:175) ~[?:?]
> at
> org.apache.hadoop.fs.s3a.S3AFileSystem.s3GetFileStatus(S3AFileSystem.java:3858)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.S3AFileSystem.innerGetFileStatus(S3AFileSystem.java:3688)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.S3AFileSystem$MkdirOperationCallbacksImpl.probePathStatus(S3AFileSystem.java:3455)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.impl.MkdirOperation.probePathStatusOrNull(MkdirOperation.java:135)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.impl.MkdirOperation.getPathStatusExpectingDir(MkdirOperation.java:150)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.impl.MkdirOperation.execute(MkdirOperation.java:80)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.impl.MkdirOperation.execute(MkdirOperation.java:45)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.impl.ExecutingStoreOperation.apply(ExecutingStoreOperation.java:76)
> ~[?:?]
> at
> org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.lambda$trackDurationOfOperation$5(IOStatisticsBinding.java:499)
> ~[?:?]
> at
> org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration(IOStatisticsBinding.java:444)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2337)
> ~[?:?]
> at
> org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2356)
> ~[?:?]
> at org.apache.hadoop.fs.s3a.S3AFileSystem.mkdirs(S3AFileSystem.java:3428)
> ~[?:?]
> at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2449) ~[?:?]
> at
> org.apache.flink.fs.s3hadoop.common.HadoopFileSystem.mkdirs(HadoopFileSystem.java:183)
> ~[?:?]
> at
> org.apache.flink.core.fs.PluginFileSystemFactory$ClassLoaderFixingFileSystem.mkdirs(PluginFileSystemFactory.java:162)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.blob.FileSystemBlobStore.<init>(FileSystemBlobStore.java:64)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.blob.BlobUtils.createFileSystemBlobStore(BlobUtils.java:108)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.blob.BlobUtils.createBlobStoreFromConfig(BlobUtils.java:86)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.kubernetes.highavailability.KubernetesHaServicesFactory.createHAServices(KubernetesHaServicesFactory.java:41)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:296)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createCustomHAServices(HighAvailabilityServicesUtils.java:285)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils.createHighAvailabilityServices(HighAvailabilityServicesUtils.java:145)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.createHaServices(ClusterEntrypoint.java:439)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.initializeServices(ClusterEntrypoint.java:382)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.runCluster(ClusterEntrypoint.java:282)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.lambda$startCluster$1(ClusterEntrypoint.java:232)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.security.contexts.NoOpSecurityContext.runSecured(NoOpSecurityContext.java:28)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.entrypoint.ClusterEntrypoint.startCluster(ClusterEntrypoint.java:229)
> ~[flink-dist-1.16.0.jar:1.16.0]
> ... 2 more{code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)