[
https://issues.apache.org/jira/browse/FLINK-29032?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Martijn Visser closed FLINK-29032.
----------------------------------
Resolution: Duplicate
I believe this is a duplicate
> Kafka Consume from timestamp catch exception : Caused by:
> java.lang.IllegalArgumentException: Invalid negative offset
> ---------------------------------------------------------------------------------------------------------------------
>
> Key: FLINK-29032
> URL: https://issues.apache.org/jira/browse/FLINK-29032
> Project: Flink
> Issue Type: Bug
> Components: Connectors / Kafka
> Affects Versions: 1.15.1
> Reporter: HunterXHunter
> Priority: Major
>
> /*+ OPTIONS(
> 'scan.startup.mode' = 'timestamp',
> 'scan.startup.timestamp-millis'='1660809660000',
> ) */;
> {code:java}
> org.apache.flink.util.FlinkRuntimeException: Failed to initialize partition
> splits due to
> at
> org.apache.flink.connector.kafka.source.enumerator.KafkaSourceEnumerator.handlePartitionSplitChanges(KafkaSourceEnumerator.java:299)
> ~[flink-sql-connector-kafka-1.15.1-vip.jar:1.15.1-vip]
> at
> org.apache.flink.runtime.source.coordinator.ExecutorNotifier.lambda$null$1(ExecutorNotifier.java:83)
> ~[flink-dist-1.15.1-vip.jar:1.15.1-vip]
> at
> org.apache.flink.util.ThrowableCatchingRunnable.run(ThrowableCatchingRunnable.java:40)
> [flink-dist-1.15.1-vip.jar:1.15.1-vip]
> at
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
> [?:1.8.0_201]
> at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> [?:1.8.0_201]
> at
> java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)
> [?:1.8.0_201]
> at
> java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)
> [?:1.8.0_201]
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> [?:1.8.0_201]
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> [?:1.8.0_201]
> at java.lang.Thread.run(Thread.java:748) [?:1.8.0_201]
> Caused by: java.lang.IllegalArgumentException: Invalid negative offset
> at
> org.apache.flink.kafka.shaded.org.apache.kafka.clients.consumer.OffsetAndTimestamp.<init>(OffsetAndTimestamp.java:36)
> ~[flink-sql-connector-kafka-1.15.1-vip.jar:1.15.1-vip]
> at
> org.apache.flink.connector.kafka.source.enumerator.KafkaSourceEnumerator$PartitionOffsetsRetrieverImpl.lambda$offsetsForTimes$8(KafkaSourceEnumerator.java:622)
> ~[flink-sql-connector-kafka-1.15.1-vip.jar:1.15.1-vip]
> at java.util.stream.Collectors.lambda$toMap$58(Collectors.java:1321)
> ~[?:1.8.0_201]
> at java.util.stream.ReduceOps$3ReducingSink.accept(ReduceOps.java:169)
> ~[?:1.8.0_201]
> at
> java.util.HashMap$EntrySpliterator.forEachRemaining(HashMap.java:1699)
> ~[?:1.8.0_201]
> at
> java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:481)
> ~[?:1.8.0_201]
> at
> java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:471)
> ~[?:1.8.0_201]
> at
> java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:708)
> ~[?:1.8.0_201]
> at
> java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234)
> ~[?:1.8.0_201]
> at
> java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:499)
> ~[?:1.8.0_201]
> at
> org.apache.flink.connector.kafka.source.enumerator.KafkaSourceEnumerator$PartitionOffsetsRetrieverImpl.offsetsForTimes(KafkaSourceEnumerator.java:615)
> ~[flink-sql-connector-kafka-1.15.1-vip.jar:1.15.1-vip]
> at
> org.apache.flink.connector.kafka.source.enumerator.initializer.TimestampOffsetsInitializer.getPartitionOffsets(TimestampOffsetsInitializer.java:57)
> ~[flink-sql-connector-kafka-1.15.1-vip.jar:1.15.1-vip]
> at
> org.apache.flink.connector.kafka.source.enumerator.KafkaSourceEnumerator.initializePartitionSplits(KafkaSourceEnumerator.java:272)
> ~[flink-sql-connector-kafka-1.15.1-vip.jar:1.15.1-vip]
> at
> org.apache.flink.connector.kafka.source.enumerator.KafkaSourceEnumerator.lambda$checkPartitionChanges$0(KafkaSourceEnumerator.java:242)
> ~[flink-sql-connector-kafka-1.15.1-vip.jar:1.15.1-vip]
> at
> org.apache.flink.runtime.source.coordinator.ExecutorNotifier.lambda$notifyReadyAsync$2(ExecutorNotifier.java:80)
> ~[flink-dist-1.15.1-vip.jar:1.15.1-vip]
> ... 7 more {code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)