Github user srdo commented on a diff in the pull request:
https://github.com/apache/storm/pull/2466#discussion_r158641898
--- Diff:
external/storm-kafka-client/src/test/java/org/apache/storm/kafka/spout/KafkaSpoutSingleTopicTest.java
---
@@ -0,0 +1,349 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.spout;
+
+
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import
org.apache.storm.kafka.spout.config.builder.SingleTopicKafkaSpoutConfiguration;
+import org.apache.storm.tuple.Values;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.mockito.Matchers.anyListOf;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.kafka.common.TopicPartition;
+import org.apache.storm.utils.Time;
+import org.mockito.Captor;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyList;
+import static org.mockito.Matchers.anyString;
+
+import java.util.regex.Pattern;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.hamcrest.Matchers;
+
+public class KafkaSpoutSingleTopicTest extends KafkaSpoutAbstractTest {
+ @Captor
+ private ArgumentCaptor<Map<TopicPartition, OffsetAndMetadata>>
commitCapture;
+ private final int maxPollRecords = 10;
+ private final int maxRetries = 3;
+
+ public KafkaSpoutSingleTopicTest() {
+ super(2_000);
+ }
+
+ @Override
+ KafkaSpoutConfig<String, String> createSpoutConfig() {
+ return SingleTopicKafkaSpoutConfiguration.setCommonSpoutConfig(
+ KafkaSpoutConfig.builder("127.0.0.1:" +
kafkaUnitRule.getKafkaUnit().getKafkaPort(),
+ Pattern.compile(SingleTopicKafkaSpoutConfiguration.TOPIC)))
+ .setOffsetCommitPeriodMs(commitOffsetPeriodMs)
+ .setRetry(new
KafkaSpoutRetryExponentialBackoff(KafkaSpoutRetryExponentialBackoff.TimeInterval.seconds(0),
KafkaSpoutRetryExponentialBackoff.TimeInterval.seconds(0),
+ maxRetries,
KafkaSpoutRetryExponentialBackoff.TimeInterval.seconds(0)))
+ .setProp(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,
maxPollRecords)
+ .build();
+ }
+
+ @Test
+ public void
testSeekToCommittedOffsetIfConsumerPositionIsBehindWhenCommitting() throws
Exception {
+ final int messageCount = maxPollRecords * 2;
+ prepareSpout(messageCount);
+
+ //Emit all messages and fail the first one while acking the rest
+ for (int i = 0; i < messageCount; i++) {
+ spout.nextTuple();
+ }
+ ArgumentCaptor<KafkaSpoutMessageId> messageIdCaptor =
ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
+ verify(collector, times(messageCount)).emit(anyString(),
anyList(), messageIdCaptor.capture());
+ List<KafkaSpoutMessageId> messageIds =
messageIdCaptor.getAllValues();
+ for (int i = 1; i < messageIds.size(); i++) {
+ spout.ack(messageIds.get(i));
+ }
+ KafkaSpoutMessageId failedTuple = messageIds.get(0);
+ spout.fail(failedTuple);
+
+ //Advance the time and replay the failed tuple.
+ reset(collector);
+ spout.nextTuple();
+ ArgumentCaptor<KafkaSpoutMessageId> failedIdReplayCaptor =
ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
+ verify(collector).emit(anyString(), anyList(),
failedIdReplayCaptor.capture());
+
+ assertThat("Expected replay of failed tuple",
failedIdReplayCaptor.getValue(), is(failedTuple));
+
+ /* Ack the tuple, and commit.
--- End diff --
Nit: Indentation on this is wrong.
---