[ 
https://issues.apache.org/jira/browse/FLINK-25453?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17860817#comment-17860817
 ] 

Matthias Pohl commented on FLINK-25453:
---------------------------------------

{code}
org.apache.flink.runtime.JobException: Recovery is suppressed by 
NoRestartBackoffTimeStrategy
        at 
org.apache.flink.runtime.executiongraph.failover.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:219)
        at 
org.apache.flink.runtime.executiongraph.failover.ExecutionFailureHandler.handleFailureAndReport(ExecutionFailureHandler.java:166)
        at 
org.apache.flink.runtime.executiongraph.failover.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:121)
        at 
org.apache.flink.runtime.scheduler.DefaultScheduler.recordTaskFailure(DefaultScheduler.java:281)
        at 
org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:272)
        at 
org.apache.flink.runtime.scheduler.adaptivebatch.AdaptiveBatchScheduler.handleTaskFailure(AdaptiveBatchScheduler.java:413)
        at 
org.apache.flink.runtime.scheduler.DefaultScheduler.onTaskFailed(DefaultScheduler.java:265)
        at 
org.apache.flink.runtime.scheduler.adaptivebatch.AdaptiveBatchScheduler.onTaskFailed(AdaptiveBatchScheduler.java:405)
        at 
org.apache.flink.runtime.scheduler.SchedulerBase.onTaskExecutionStateUpdate(SchedulerBase.java:800)
        at 
org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:777)
        at 
org.apache.flink.runtime.scheduler.SchedulerNG.updateTaskExecutionState(SchedulerNG.java:83)
        at 
org.apache.flink.runtime.jobmaster.JobMaster.updateTaskExecutionState(JobMaster.java:515)
        at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.apache.flink.runtime.rpc.pekko.PekkoRpcActor.lambda$handleRpcInvocation$1(PekkoRpcActor.java:318)
        at 
org.apache.flink.runtime.concurrent.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:83)
        at 
org.apache.flink.runtime.rpc.pekko.PekkoRpcActor.handleRpcInvocation(PekkoRpcActor.java:316)
        at 
org.apache.flink.runtime.rpc.pekko.PekkoRpcActor.handleRpcMessage(PekkoRpcActor.java:229)
        at 
org.apache.flink.runtime.rpc.pekko.FencedPekkoRpcActor.handleRpcMessage(FencedPekkoRpcActor.java:88)
        at 
org.apache.flink.runtime.rpc.pekko.PekkoRpcActor.handleMessage(PekkoRpcActor.java:174)
        at 
org.apache.pekko.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:33)
        at 
org.apache.pekko.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:29)
        at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
        at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
        at 
org.apache.pekko.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:29)
        at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
        at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
        at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
        at org.apache.pekko.actor.Actor.aroundReceive(Actor.scala:547)
        at org.apache.pekko.actor.Actor.aroundReceive$(Actor.scala:545)
        at 
org.apache.pekko.actor.AbstractActor.aroundReceive(AbstractActor.scala:229)
        at org.apache.pekko.actor.ActorCell.receiveMessage(ActorCell.scala:590)
        at org.apache.pekko.actor.ActorCell.invoke(ActorCell.scala:557)
        at org.apache.pekko.dispatch.Mailbox.processMailbox(Mailbox.scala:280)
        at org.apache.pekko.dispatch.Mailbox.run(Mailbox.scala:241)
        at org.apache.pekko.dispatch.Mailbox.exec(Mailbox.scala:253)
        at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
        at 
java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
        at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
        at 
java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: java.lang.RuntimeException: One or more fetchers have encountered 
exception
        at 
org.apache.flink.connector.base.source.reader.fetcher.SplitFetcherManager.checkErrors(SplitFetcherManager.java:333)
        at 
org.apache.flink.connector.base.source.reader.SourceReaderBase.getNextFetch(SourceReaderBase.java:228)
        at 
org.apache.flink.connector.base.source.reader.SourceReaderBase.pollNext(SourceReaderBase.java:190)
        at 
org.apache.flink.streaming.api.operators.SourceOperator.emitNext(SourceOperator.java:422)
        at 
org.apache.flink.streaming.runtime.io.StreamTaskSourceInput.emitNext(StreamTaskSourceInput.java:68)
        at 
org.apache.flink.streaming.runtime.io.StreamOneInputProcessor.processInput(StreamOneInputProcessor.java:65)
        at 
org.apache.flink.streaming.runtime.tasks.StreamTask.processInput(StreamTask.java:638)
        at 
org.apache.flink.streaming.runtime.tasks.mailbox.MailboxProcessor.runMailboxLoop(MailboxProcessor.java:231)
        at 
org.apache.flink.streaming.runtime.tasks.StreamTask.runMailboxLoop(StreamTask.java:973)
        at 
org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:917)
        at 
org.apache.flink.runtime.taskmanager.Task.runWithSystemExitMonitoring(Task.java:970)
        at 
org.apache.flink.runtime.taskmanager.Task.restoreAndInvoke(Task.java:949)
        at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:763)
        at org.apache.flink.runtime.taskmanager.Task.run(Task.java:575)q
        at java.lang.Thread.run(Thread.java:750)
Caused by: java.lang.RuntimeException: SplitFetcher thread 0 received 
unexpected exception while polling the records
        at 
org.apache.flink.connector.base.source.reader.fetcher.SplitFetcher.runOnce(SplitFetcher.java:168)
        at 
org.apache.flink.connector.base.source.reader.fetcher.SplitFetcher.run(SplitFetcher.java:117)
        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        ... 1 more
Caused by: java.io.FileNotFoundException: File 
file:/tmp/junit1080802548326386161/filesystem_catalog_1/test_db/my_materialized_table_in_full_mode/data/ds=2024-06-27/part-17c8d491-7cee-4b52-8b71-a87cf952f053-task-0-file-0
 does not exist or the user running Flink ('vsts') has insufficient permissions 
to access it.
        at 
org.apache.flink.core.fs.local.LocalFileSystem.getFileStatus(LocalFileSystem.java:106)
        at 
org.apache.flink.api.common.io.FileInputFormat.open(FileInputFormat.java:849)
        at 
org.apache.flink.api.common.io.DelimitedInputFormat.open(DelimitedInputFormat.java:499)
        at 
org.apache.flink.connector.file.table.DeserializationSchemaAdapter$LineBytesInputFormat.open(DeserializationSchemaAdapter.java:175)
        at 
org.apache.flink.connector.file.table.DeserializationSchemaAdapter$Reader.<init>(DeserializationSchemaAdapter.java:112)
        at 
org.apache.flink.connector.file.table.DeserializationSchemaAdapter$Reader.<init>(DeserializationSchemaAdapter.java:105)
        at 
org.apache.flink.connector.file.table.DeserializationSchemaAdapter.createReader(DeserializationSchemaAdapter.java:85)
        at 
org.apache.flink.connector.file.table.DeserializationSchemaAdapter.createReader(DeserializationSchemaAdapter.java:49)
        at 
org.apache.flink.connector.file.table.FileInfoExtractorBulkFormat.createReader(FileInfoExtractorBulkFormat.java:109)
        at 
org.apache.flink.connector.file.src.impl.FileSourceSplitReader.checkSplitOrStartNext(FileSourceSplitReader.java:112)
        at 
org.apache.flink.connector.file.src.impl.FileSourceSplitReader.fetch(FileSourceSplitReader.java:65)
        at 
org.apache.flink.connector.base.source.reader.fetcher.FetchTask.run(FetchTask.java:58)
        at 
org.apache.flink.connector.base.source.reader.fetcher.SplitFetcher.runOnce(SplitFetcher.java:165)
        ... 6 more
{code}
https://dev.azure.com/apache-flink/apache-flink/_build/results?buildId=60530&view=logs&j=af184cdd-c6d8-5084-0b69-7e9c67b35f7a&t=0f3adb59-eefa-51c6-2858-3654d9e0749d&l=15251

> LocalFileSystem#listStatus throws FileNotFoundException when a file is 
> deleted concurrently in the directory
> ------------------------------------------------------------------------------------------------------------
>
>                 Key: FLINK-25453
>                 URL: https://issues.apache.org/jira/browse/FLINK-25453
>             Project: Flink
>          Issue Type: Bug
>          Components: API / Core
>    Affects Versions: 1.14.2
>            Reporter: Caizhi Weng
>            Priority: Major
>
> Add the following test case to {{LocalFileSystemTest}} to reproduce this 
> issue.
> {code:java}
> @Test
> public void myTest() throws Exception {
>     temporaryFolder.create();
>     Runnable r =
>             () -> {
>                 try {
>                     Path path = new Path(temporaryFolder.getRoot() + "/" + 
> UUID.randomUUID());
>                     FileSystem fs = path.getFileSystem();
>                     try (FSDataOutputStream out =
>                             fs.create(path, 
> FileSystem.WriteMode.NO_OVERWRITE)) {
>                         OutputStreamWriter writer =
>                                 new OutputStreamWriter(out, 
> StandardCharsets.UTF_8);
>                         writer.write("test");
>                         writer.flush();
>                     }
>                     Thread.sleep(ThreadLocalRandom.current().nextInt(100));
>                     fs.listStatus(new 
> Path(temporaryFolder.getRoot().toString()));
>                     Thread.sleep(ThreadLocalRandom.current().nextInt(100));
>                     fs.delete(path, false);
>                 } catch (Exception e) {
>                     e.printStackTrace();
>                 }
>             };
>     List<Thread> threads = new ArrayList<>();
>     for (int i = 0; i < 100; i++) {
>         Thread thread = new Thread(r);
>         thread.start();
>         threads.add(thread);
>     }
>     for (Thread thread : threads) {
>         thread.join();
>     }
> }
> {code}
> Exception stack
> {code}
> java.io.FileNotFoundException: File 
> /var/folders/y9/hqm_j18s105g5n8_xq00rd7c0000gp/T/junit8680341925762938456/f3b7f8a3-7092-464a-af7e-e7f8465c041d
>  does not exist or the user running Flink ('tsreaper') has insufficient 
> permissions to access it.
>       at 
> org.apache.flink.core.fs.local.LocalFileSystem.getFileStatus(LocalFileSystem.java:106)
>       at 
> org.apache.flink.core.fs.local.LocalFileSystem.listStatus(LocalFileSystem.java:167)
>       at 
> org.apache.flink.core.fs.local.LocalFileSystemTest.lambda$myTest$0(LocalFileSystemTest.java:90)
>       at java.lang.Thread.run(Thread.java:748)
> {code}
> This is because {{listStatus}} is not atomic. {{LocalFileSystem}} will first 
> get all file names in the directory and query for the status of each file. If 
> that file is removed after the start of {{listStatus}} but before the query 
> {{FileNotFoundException}} will be thrown.
> Hadoop's {{RawLocalFileSystem}} handles this by ignoring 
> {{FileNotFoundException}}.
> {code:java}
> for (int i = 0; i < names.length; i++) {
>   try {
>     // Assemble the path using the Path 3 arg constructor to make sure
>     // paths with colon are properly resolved on Linux
>     results[j] = getFileStatus(new Path(f, new Path(null, null, names[i])));
>     j++;
>   } catch (FileNotFoundException e) {
>     // ignore the files not found since the dir list may have have changed
>     // since the names[] list was generated.
>   }
> }
> {code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to