cadonna commented on code in PR #13756:
URL: https://github.com/apache/kafka/pull/13756#discussion_r1229867769


##########
streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBufferTest.java:
##########
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import org.apache.kafka.common.header.internals.RecordHeaders;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.metrics.Sensor;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.MockTime;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.processor.StateStoreContext;
+import org.apache.kafka.streams.processor.TaskId;
+import org.apache.kafka.streams.processor.api.Record;
+import org.apache.kafka.streams.processor.internals.InternalProcessorContext;
+import org.apache.kafka.streams.processor.internals.ProcessorRecordContext;
+import org.apache.kafka.streams.processor.internals.SerdeGetter;
+import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl;
+import org.apache.kafka.test.MockInternalNewProcessorContext;
+import org.apache.kafka.test.StreamsTestUtils;
+import org.apache.kafka.test.TestUtils;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.time.Duration;
+import java.util.concurrent.atomic.AtomicInteger;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.mockito.Mockito.when;
+
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
+public class RocksDBTimeOrderedKeyValueBufferTest {
+    public RocksDBTimeOrderedKeyValueBuffer<String, String> buffer;
+    @Mock
+    public SerdeGetter serdeGetter;
+    public InternalProcessorContext<String, String> context;
+    public StreamsMetricsImpl streamsMetrics;
+    @Mock
+    public Sensor sensor;
+    public long offset;
+
+    @Before
+    public void setUp() {
+        when(serdeGetter.keySerde()).thenReturn(new Serdes.StringSerde());
+        when(serdeGetter.valueSerde()).thenReturn(new Serdes.StringSerde());
+        final Metrics metrics = new Metrics();
+        offset = 0;
+        streamsMetrics = new StreamsMetricsImpl(metrics, "test-client", 
StreamsConfig.METRICS_LATEST, new MockTime());
+        context = new 
MockInternalNewProcessorContext<>(StreamsTestUtils.getStreamsConfig(), new 
TaskId(0, 0), TestUtils.tempDirectory());
+    }
+
+    private void createBuffer(final Duration grace) {
+        final RocksDBTimeOrderedKeyValueBytesStore store = new 
RocksDBTimeOrderedKeyValueBytesStoreSupplier("testing",  100).get();
+        buffer = new RocksDBTimeOrderedKeyValueBuffer<>(store, grace, 
"testing");
+        buffer.setSerdesIfNull(serdeGetter);
+        buffer.init((StateStoreContext) context, store);
+    }
+
+    private void pipeRecord(final String key, final String value, final long 
time) {
+        final Record<String, String> record = new Record<>(key, value, time);
+        context.setRecordContext(new ProcessorRecordContext(time, offset++, 0, 
"testing", new RecordHeaders()));
+        buffer.put(time, record, context.recordContext());
+    }
+
+    @Test
+    public void shouldPutInBufferAndUpdateFields() {
+        createBuffer(Duration.ofMinutes(1));
+        assertNumSizeAndTimestamp(buffer, 0, Long.MAX_VALUE, 0);
+        pipeRecord("1", "0", 0L);
+        assertNumSizeAndTimestamp(buffer, 1, 0, 42);
+        pipeRecord("3", "0", 2L);
+        assertNumSizeAndTimestamp(buffer, 2, 0, 84);
+

Review Comment:
   ```suggestion
   ```



##########
streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBuffer.java:
##########
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import org.apache.kafka.common.serialization.Serde;
+import org.apache.kafka.common.utils.Bytes;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.processor.ProcessorContext;
+import org.apache.kafka.streams.processor.StateStore;
+import org.apache.kafka.streams.processor.StateStoreContext;
+import org.apache.kafka.streams.processor.api.Record;
+import org.apache.kafka.streams.processor.internals.ProcessorRecordContext;
+import org.apache.kafka.streams.processor.internals.SerdeGetter;
+import org.apache.kafka.streams.state.KeyValueIterator;
+import org.apache.kafka.streams.state.ValueAndTimestamp;
+
+import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import static java.util.Objects.requireNonNull;
+
+public class RocksDBTimeOrderedKeyValueBuffer<K, V> extends 
WrappedStateStore<RocksDBTimeOrderedKeyValueBytesStore, Object, Object> 
implements TimeOrderedKeyValueBuffer<K, V, V> {
+
+    private final long gracePeriod;
+    private long bufferSize;
+    private long minTimestamp;
+    private int numRecords;
+    private Serde<K> keySerde;
+    private Serde<V> valueSerde;
+    private final String topic;
+    private int seqnum;
+
+    public RocksDBTimeOrderedKeyValueBuffer(final 
RocksDBTimeOrderedKeyValueBytesStore store,
+                                            final Duration gracePeriod,
+                                            final String topic) {
+        super(store);
+        this.gracePeriod = gracePeriod.toMillis();
+        minTimestamp = Long.MAX_VALUE;
+        numRecords = 0;
+        bufferSize = 0;
+        seqnum = 0;
+        this.topic = topic;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void setSerdesIfNull(final SerdeGetter getter) {
+        keySerde = keySerde == null ? (Serde<K>) getter.keySerde() : keySerde;
+        valueSerde = valueSerde == null ? getter.valueSerde() : valueSerde;
+    }
+
+    @Deprecated
+    @Override
+    public void init(final ProcessorContext context, final StateStore root) {
+        wrapped().init(context, wrapped());
+    }
+
+    @Override
+    public void init(final StateStoreContext context, final StateStore root) {
+        wrapped().init(context, wrapped());
+    }
+
+    @Override
+    public void evictWhile(final Supplier<Boolean> predicate, final 
Consumer<Eviction<K, V>> callback) {
+        KeyValue<Bytes, byte[]> keyValue;
+
+        if (predicate.get()) {
+            try (final KeyValueIterator<Bytes, byte[]> iterator = wrapped()
+                .fetchAll(0, wrapped().observedStreamTime - gracePeriod)) {
+                while (iterator.hasNext() && predicate.get()) {
+                    keyValue = iterator.next();
+
+                    final BufferValue bufferValue = 
BufferValue.deserialize(ByteBuffer.wrap(keyValue.value));
+                    final K key = keySerde.deserializer().deserialize(topic,
+                        
PrefixedWindowKeySchemas.TimeFirstWindowKeySchema.extractStoreKeyBytes(keyValue.key.get()));
+                    minTimestamp = bufferValue.context().timestamp();
+
+                    final V value = 
valueSerde.deserializer().deserialize(topic, bufferValue.newValue());
+
+                    if (bufferValue.context().timestamp() < minTimestamp) {
+                        throw new IllegalStateException(
+                            "minTimestamp [" + minTimestamp + "] did not match 
the actual min timestamp [" +
+                                bufferValue.context().timestamp() + "]"
+                        );
+                    }

Review Comment:
   In what situation would this condition be true?
   On line 92, you assign `bufferValue.context().timestamp()` to `minTimestamp` 
and here you verify whether `minTimestamp` is greater than 
`bufferValue.context().timestamp()`.



##########
streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBuffer.java:
##########
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import org.apache.kafka.common.serialization.Serde;
+import org.apache.kafka.common.utils.Bytes;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.processor.ProcessorContext;
+import org.apache.kafka.streams.processor.StateStore;
+import org.apache.kafka.streams.processor.StateStoreContext;
+import org.apache.kafka.streams.processor.api.Record;
+import org.apache.kafka.streams.processor.internals.ProcessorRecordContext;
+import org.apache.kafka.streams.processor.internals.SerdeGetter;
+import org.apache.kafka.streams.state.KeyValueIterator;
+import org.apache.kafka.streams.state.ValueAndTimestamp;
+
+import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import static java.util.Objects.requireNonNull;
+
+public class RocksDBTimeOrderedKeyValueBuffer<K, V> extends 
WrappedStateStore<RocksDBTimeOrderedKeyValueBytesStore, Object, Object> 
implements TimeOrderedKeyValueBuffer<K, V, V> {
+
+    private final long gracePeriod;
+    private long bufferSize;
+    private long minTimestamp;
+    private int numRecords;
+    private Serde<K> keySerde;
+    private Serde<V> valueSerde;
+    private final String topic;
+    private int seqnum;
+
+    public RocksDBTimeOrderedKeyValueBuffer(final 
RocksDBTimeOrderedKeyValueBytesStore store,
+                                            final Duration gracePeriod,
+                                            final String topic) {
+        super(store);
+        this.gracePeriod = gracePeriod.toMillis();
+        minTimestamp = Long.MAX_VALUE;
+        numRecords = 0;
+        bufferSize = 0;
+        seqnum = 0;
+        this.topic = topic;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void setSerdesIfNull(final SerdeGetter getter) {
+        keySerde = keySerde == null ? (Serde<K>) getter.keySerde() : keySerde;
+        valueSerde = valueSerde == null ? getter.valueSerde() : valueSerde;
+    }
+
+    @Deprecated
+    @Override
+    public void init(final ProcessorContext context, final StateStore root) {
+        wrapped().init(context, wrapped());
+    }
+
+    @Override
+    public void init(final StateStoreContext context, final StateStore root) {
+        wrapped().init(context, wrapped());
+    }
+
+    @Override
+    public void evictWhile(final Supplier<Boolean> predicate, final 
Consumer<Eviction<K, V>> callback) {
+        KeyValue<Bytes, byte[]> keyValue;
+
+        if (predicate.get()) {
+            try (final KeyValueIterator<Bytes, byte[]> iterator = wrapped()
+                .fetchAll(0, wrapped().observedStreamTime - gracePeriod)) {
+                while (iterator.hasNext() && predicate.get()) {
+                    keyValue = iterator.next();
+
+                    final BufferValue bufferValue = 
BufferValue.deserialize(ByteBuffer.wrap(keyValue.value));
+                    final K key = keySerde.deserializer().deserialize(topic,
+                        
PrefixedWindowKeySchemas.TimeFirstWindowKeySchema.extractStoreKeyBytes(keyValue.key.get()));
+                    minTimestamp = bufferValue.context().timestamp();
+
+                    final V value = 
valueSerde.deserializer().deserialize(topic, bufferValue.newValue());
+
+                    if (bufferValue.context().timestamp() < minTimestamp) {
+                        throw new IllegalStateException(
+                            "minTimestamp [" + minTimestamp + "] did not match 
the actual min timestamp [" +
+                                bufferValue.context().timestamp() + "]"
+                        );
+                    }
+
+                    callback.accept(new Eviction<>(key, value, 
bufferValue.context()));
+
+                    wrapped().remove(keyValue.key);
+                    numRecords--;
+                    bufferSize = bufferSize - computeRecordSize(keyValue.key, 
bufferValue);
+                }
+                if (numRecords == 0) {
+                    minTimestamp = Long.MAX_VALUE;
+                } else {
+                    minTimestamp = wrapped().observedStreamTime - gracePeriod 
+ 1;

Review Comment:
   I am sorry but I cannot follow. Where is `minTimestamp` used?



##########
streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBuffer.java:
##########
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import org.apache.kafka.common.serialization.Serde;
+import org.apache.kafka.common.utils.Bytes;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.processor.ProcessorContext;
+import org.apache.kafka.streams.processor.StateStore;
+import org.apache.kafka.streams.processor.StateStoreContext;
+import org.apache.kafka.streams.processor.api.Record;
+import org.apache.kafka.streams.processor.internals.ProcessorRecordContext;
+import org.apache.kafka.streams.processor.internals.SerdeGetter;
+import org.apache.kafka.streams.state.KeyValueIterator;
+import org.apache.kafka.streams.state.ValueAndTimestamp;
+
+import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import static java.util.Objects.requireNonNull;
+
+public class RocksDBTimeOrderedKeyValueBuffer<K, V> extends 
WrappedStateStore<RocksDBTimeOrderedKeyValueSegmentedBytesStore, Object, 
Object> implements TimeOrderedKeyValueBuffer<K, V, V> {
+
+    private final long gracePeriod;
+    private long bufferSize;
+    private long minTimestamp;
+    private int numRecords;
+    private Serde<K> keySerde;
+    private Serde<V> valueSerde;
+    private final String topic;
+    private int seqnum;
+
+    public RocksDBTimeOrderedKeyValueBuffer(final 
RocksDBTimeOrderedKeyValueSegmentedBytesStore store,
+                                            final Duration gracePeriod,
+                                            final String topic) {
+        super(store);
+        this.gracePeriod = gracePeriod.toMillis();
+        minTimestamp = Long.MAX_VALUE;
+        numRecords = 0;
+        bufferSize = 0;
+        seqnum = 0;
+        this.topic = topic;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void setSerdesIfNull(final SerdeGetter getter) {
+        keySerde = keySerde == null ? (Serde<K>) getter.keySerde() : keySerde;
+        valueSerde = valueSerde == null ? getter.valueSerde() : valueSerde;
+    }
+
+    @Deprecated
+    @Override
+    public void init(final ProcessorContext context, final StateStore root) {
+        wrapped().init(context, wrapped());

Review Comment:
   I think that is a good thought. I do not know the reason why we pass the 
root through the constructor and through the `init()` method. 



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBufferTest.java:
##########
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import org.apache.kafka.common.header.internals.RecordHeaders;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.metrics.Sensor;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.MockTime;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.processor.StateStoreContext;
+import org.apache.kafka.streams.processor.TaskId;
+import org.apache.kafka.streams.processor.api.Record;
+import org.apache.kafka.streams.processor.internals.InternalProcessorContext;
+import org.apache.kafka.streams.processor.internals.ProcessorRecordContext;
+import org.apache.kafka.streams.processor.internals.SerdeGetter;
+import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl;
+import org.apache.kafka.test.MockInternalNewProcessorContext;
+import org.apache.kafka.test.StreamsTestUtils;
+import org.apache.kafka.test.TestUtils;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.time.Duration;
+import java.util.concurrent.atomic.AtomicInteger;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.mockito.Mockito.when;
+
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
+public class RocksDBTimeOrderedKeyValueBufferTest {
+    public RocksDBTimeOrderedKeyValueBuffer<String, String> buffer;
+    @Mock
+    public SerdeGetter serdeGetter;
+    public InternalProcessorContext<String, String> context;
+    public StreamsMetricsImpl streamsMetrics;
+    @Mock
+    public Sensor sensor;
+    public long offset;
+
+    @Before
+    public void setUp() {
+        when(serdeGetter.keySerde()).thenReturn(new Serdes.StringSerde());
+        when(serdeGetter.valueSerde()).thenReturn(new Serdes.StringSerde());
+        final Metrics metrics = new Metrics();
+        offset = 0;
+        streamsMetrics = new StreamsMetricsImpl(metrics, "test-client", 
StreamsConfig.METRICS_LATEST, new MockTime());
+        context = new 
MockInternalNewProcessorContext<>(StreamsTestUtils.getStreamsConfig(), new 
TaskId(0, 0), TestUtils.tempDirectory());
+    }
+
+    public void createJoin(final Duration grace) {
+        final RocksDBTimeOrderedKeyValueBytesStore store = new 
RocksDbTimeOrderedKeyValueBytesStoreSupplier("testing",  100).get();
+        buffer = new RocksDBTimeOrderedKeyValueBuffer<>(store, grace, 
"testing");
+        buffer.setSerdesIfNull(serdeGetter);
+        store.init((StateStoreContext) context, store);
+        buffer.init((StateStoreContext) context, store);
+    }
+
+    private void pipeRecord(final String key, final String value, final long 
time) {
+        final Record<String, String> record = new Record<>(key, value, time);
+        context.setRecordContext(new ProcessorRecordContext(time, offset++, 0, 
"testing", new RecordHeaders()));
+        buffer.put(time, record, context.recordContext());
+    }
+
+    @Test
+    public void shouldAddAndEvictRecord() {
+        createJoin(Duration.ZERO);
+        final AtomicInteger count = new AtomicInteger(0);
+        pipeRecord("1", "0", 0L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(1));
+    }
+
+    @Test
+    public void shouldAddAndEvictRecordTwice() {
+        createJoin(Duration.ZERO);
+        final AtomicInteger count = new AtomicInteger(0);
+        pipeRecord("1", "0", 0L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(1));
+        pipeRecord("2", "0", 1L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(2));
+    }
+
+    @Test
+    public void shouldAddAndEvictRecordTwiceWithNonZeroGrace() {
+        createJoin(Duration.ofMillis(1));
+        final AtomicInteger count = new AtomicInteger(0);
+        pipeRecord("1", "0", 0L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(0));
+        pipeRecord("2", "0", 1L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(1));
+    }
+
+    @Test
+    public void shouldAddRecordsTwiceAndEvictRecordsOnce() {
+        createJoin(Duration.ZERO);
+        final AtomicInteger count = new AtomicInteger(0);
+        pipeRecord("1", "0", 0L);
+        buffer.evictWhile(() -> buffer.numRecords() > 1, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(0));
+        pipeRecord("2", "0", 1L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(2));
+    }
+
+    @Test
+    public void shouldDropLateRecords() {
+        createJoin(Duration.ZERO);
+        final AtomicInteger count = new AtomicInteger(0);
+        pipeRecord("1", "0", 1L);
+        buffer.evictWhile(() -> buffer.numRecords() > 1, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(0));
+        pipeRecord("2", "0", 0L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(1));
+    }
+
+    @Test
+    public void shouldDropLateRecordsWithNonZeroGrace() {
+        createJoin(Duration.ofMillis(1));
+        final AtomicInteger count = new AtomicInteger(0);
+        pipeRecord("1", "0", 2L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(0));
+        pipeRecord("2", "0", 1L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(1));
+        pipeRecord("2", "0", 0L);
+        buffer.evictWhile(() -> buffer.numRecords() > 0, r -> 
count.getAndIncrement());
+        assertThat(count.get(), equalTo(1));
+    }
+
+    @Test
+    public void shouldHandleCollidingKeys() {

Review Comment:
   Oh, yes! You are right! 
   That could become an issue with the seqnum issue that we have. The number of 
records can be different from the number of keys in the state store.  



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to