Github user rmetzger commented on a diff in the pull request:

    https://github.com/apache/flink/pull/2580#discussion_r82576069
  
    --- Diff: 
flink-streaming-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
 ---
    @@ -208,6 +207,235 @@ public void runFailOnNoBrokerTest() throws Exception {
                        }
                }
        }
    +
    +   /**
    +    * Ensures that the committed offsets to Kafka are the offsets of "the 
next record to process"
    +    */
    +   public void runCommitOffsetsToKafka() throws Exception {
    +           // 3 partitions with 50 records each (0-49, so the expected 
commit offset of each partition should be 50)
    +           final int parallelism = 3;
    +           final int recordsInEachPartition = 50;
    +
    +           final String topicName = 
writeSequence("testCommitOffsetsToKafkaTopic", recordsInEachPartition, 
parallelism, 1);
    +
    +           final StreamExecutionEnvironment env = 
StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    +           env.getConfig().disableSysoutLogging();
    +           
env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    +           env.setParallelism(parallelism);
    +           env.enableCheckpointing(200);
    +
    +           DataStream<String> stream = 
env.addSource(kafkaServer.getConsumer(topicName, new SimpleStringSchema(), 
standardProps));
    +           stream.addSink(new DiscardingSink<String>());
    +
    +           final AtomicReference<Throwable> errorRef = new 
AtomicReference<>();
    +           final Thread runner = new Thread("runner") {
    +                   @Override
    +                   public void run() {
    +                           try {
    +                                   env.execute();
    +                           }
    +                           catch (Throwable t) {
    +                                   if (!(t.getCause() instanceof 
JobCancellationException)) {
    +                                           errorRef.set(t);
    +                                   }
    +                           }
    +                   }
    +           };
    +           runner.start();
    +
    +           final Long l50 = 50L; // the final committed offset in Kafka 
should be 50
    +           final long deadline = 30000 + System.currentTimeMillis();
    +
    +           KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = 
kafkaServer.createOffsetHandler(standardProps);
    +
    +           do {
    +                   Long o1 = 
kafkaOffsetHandler.getCommittedOffset(topicName, 0);
    +                   Long o2 = 
kafkaOffsetHandler.getCommittedOffset(topicName, 1);
    +                   Long o3 = 
kafkaOffsetHandler.getCommittedOffset(topicName, 2);
    +
    +                   if (l50.equals(o1) && l50.equals(o2) && l50.equals(o3)) 
{
    +                           break;
    +                   }
    +
    +                   Thread.sleep(100);
    +           }
    +           while (System.currentTimeMillis() < deadline);
    +
    +           // cancel the job
    +           
JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout));
    +
    +           final Throwable t = errorRef.get();
    +           if (t != null) {
    +                   throw new RuntimeException("Job failed with an 
exception", t);
    +           }
    +
    +           // final check to see if offsets are correctly in Kafka
    +           Long o1 = kafkaOffsetHandler.getCommittedOffset(topicName, 0);
    +           Long o2 = kafkaOffsetHandler.getCommittedOffset(topicName, 1);
    +           Long o3 = kafkaOffsetHandler.getCommittedOffset(topicName, 2);
    +           Assert.assertEquals(Long.valueOf(50L), o1);
    +           Assert.assertEquals(Long.valueOf(50L), o2);
    +           Assert.assertEquals(Long.valueOf(50L), o3);
    +
    +           kafkaOffsetHandler.close();
    +           deleteTestTopic(topicName);
    +   }
    +
    +   /**
    +    * This test first writes a total of 200 records to a test topic, reads 
the first 100 so that some offsets are
    --- End diff --
    
    300, 150


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to