[
https://issues.apache.org/jira/browse/FLINK-30111?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Danny Cranmer reassigned FLINK-30111:
-------------------------------------
Assignee: Samrat Deb
> CacheRead fails with Intermediate data set with ID not found
> ------------------------------------------------------------
>
> Key: FLINK-30111
> URL: https://issues.apache.org/jira/browse/FLINK-30111
> Project: Flink
> Issue Type: Bug
> Affects Versions: 1.16.0
> Reporter: Prabhu Joseph
> Assignee: Samrat Deb
> Priority: Major
>
> CacheRead fails with below exception when running multiple parallel jobs in
> detached mode which all reads from a same CacheDataStream. The same
> application runs fine when either running in Attached Mode or when not using
> Cache.
> {code:java}
> 2022-11-21 08:19:31,762 INFO
> org.apache.flink.runtime.executiongraph.ExecutionGraph [] - CacheRead
> -> Map -> Sink: Writer (1/1)
> (8002916773ad489098a05e6835288f29_cbc357ccb763df2852fee8c4fc7d55f2_0_0)
> switched from RUNNING to FAILED on container_1668960408356_0009_01_000009 @
> ip-172-31-38-144.us-west-2.compute.internal (dataPort=38433).
> java.lang.IllegalArgumentException: Intermediate data set with ID
> f0d8150945d3e396b8c0a4f6a527a8ce not found.
> at
> org.apache.flink.runtime.scheduler.ExecutionGraphHandler.requestPartitionState(ExecutionGraphHandler.java:173)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.scheduler.SchedulerBase.requestPartitionState(SchedulerBase.java:763)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at
> org.apache.flink.runtime.jobmaster.JobMaster.requestPartitionState(JobMaster.java:515)
> ~[flink-dist-1.16.0.jar:1.16.0]
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> ~[?:1.8.0_342]
> at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> ~[?:1.8.0_342]
> at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> ~[?:1.8.0_342]
> at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_342]
> at
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRpcInvocation$1(AkkaRpcActor.java:309)
> ~[flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:83)
> ~[flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcInvocation(AkkaRpcActor.java:307)
> ~[flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:222)
> ~[flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:84)
> ~[flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:168)
> ~[flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at scala.PartialFunction.applyOrElse(PartialFunction.scala:123)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at scala.PartialFunction.applyOrElse$(PartialFunction.scala:122)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:171)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:172)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at
> scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:172)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.actor.Actor.aroundReceive(Actor.scala:537)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.actor.Actor.aroundReceive$(Actor.scala:535)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.actor.ActorCell.receiveMessage(ActorCell.scala:580)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.actor.ActorCell.invoke(ActorCell.scala:548)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.dispatch.Mailbox.run(Mailbox.scala:231)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
> [flink-rpc-akka_e220e3c9-e81e-4259-9655-37a1f83e8a36.jar:1.16.0]
> at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
> [?:1.8.0_342]
> at
> java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
> [?:1.8.0_342]
> at
> java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
> [?:1.8.0_342]
> at
> java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
> [?:1.8.0_342]
> {code}
> *Repro:*
> {code:java}
> import org.apache.flink.api.common.RuntimeExecutionMode;
> import org.apache.flink.api.common.eventtime.WatermarkStrategy;
> import org.apache.flink.api.common.serialization.SimpleStringEncoder;
> import org.apache.flink.api.java.tuple.Tuple2;
> import org.apache.flink.connector.file.sink.FileSink;
> import org.apache.flink.connector.file.src.FileSource;
> import org.apache.flink.connector.file.src.reader.TextLineInputFormat;
> import org.apache.flink.streaming.api.datastream.CachedDataStream;
> import org.apache.flink.streaming.api.datastream.DataStreamSource;
> import
> org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
> import
> org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
> import java.io.File;
> import java.io.FileWriter;
> import java.util.UUID;
> public class CacheTester {
> public static void main(String[] args) throws Exception {
> StreamExecutionEnvironment env =
> StreamExecutionEnvironment.getExecutionEnvironment();
> env.setRuntimeMode(RuntimeExecutionMode.BATCH);
> File datafile1 = new File("/tmp/FLINK/input1");
> FileSource<String> source =
> FileSource.forRecordStreamFormat(
> new TextLineInputFormat(),
> new
> org.apache.flink.core.fs.Path(datafile1.getPath()))
> .build();
> final CachedDataStream<Integer> cachedDataStream =
>
> env.fromSource(source,WatermarkStrategy.noWatermarks(), "source").map(i ->
> Integer.parseInt(i) + 1).cache();
> for (int i=0; i<100; i++) {
> SingleOutputStreamOperator out2 = cachedDataStream.map(j
> -> j + 1);
> File outputFile = new File("/tmp/FLINK/output" + (i));
> FileSink fileSink = FileSink.forRowFormat(
> new
> org.apache.flink.core.fs.Path(outputFile.getPath()),
> new SimpleStringEncoder<>())
> .build();
> out2.sinkTo(fileSink);
> env.execute();
> try { Thread.sleep(2); } catch(Exception e) {}
> }
> }
> }
> [root@ip-172-31-38-144 container_1668960408356_0008_01_000001]# cat
> /tmp/FLINK/input1
> 1
> 2
> 3
> flink-yarn-session -d
> flink run -d -c CacheTester cachetester.jar
> {code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)