mjsax commented on code in PR #13143:
URL: https://github.com/apache/kafka/pull/13143#discussion_r1093764504


##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentTest.java:
##########
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.serialization.Serializer;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.utils.Bytes;
+import org.apache.kafka.common.utils.Utils;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.errors.InvalidStateStoreException;
+import org.apache.kafka.streams.processor.StateStoreContext;
+import org.apache.kafka.streams.state.KeyValueIterator;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.StreamsTestUtils;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentTest {
+
+    private static final String STORE_NAME = "physical-rocks";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+    private static final Serializer<String> STRING_SERIALIZER = new 
StringSerializer();
+    private static final Deserializer<String> STRING_DESERIALIZER = new 
StringDeserializer();
+
+    private RocksDBStore physicalStore;
+
+    private LogicalKeyValueSegment segment1;
+    private LogicalKeyValueSegment segment2;
+
+    @Before
+    public void setUp() {
+        physicalStore = new RocksDBStore(STORE_NAME, DB_FILE_DIR, new 
RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME), false);
+        physicalStore.init((StateStoreContext) new 
InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.String(),
+            new StreamsConfig(StreamsTestUtils.getStreamsConfig())
+        ), physicalStore);
+
+        segment1 = new LogicalKeyValueSegment(1, "segment-1", physicalStore);
+        segment2 = new LogicalKeyValueSegment(2, "segment-2", physicalStore);
+    }
+
+    @After
+    public void tearDown() {
+        segment1.close();
+        segment2.close();
+        physicalStore.close();
+    }
+
+    @Test
+    public void shouldPut() {
+        final KeyValue<String, String> kv0 = new KeyValue<>("1", "a");
+        final KeyValue<String, String> kv1 = new KeyValue<>("2", "b");
+
+        segment1.put(new Bytes(kv0.key.getBytes(UTF_8)), 
kv0.value.getBytes(UTF_8));

Review Comment:
   Why use `getBytes` here and the `StringSerializer` below?



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentTest.java:
##########
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.serialization.Serializer;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.utils.Bytes;
+import org.apache.kafka.common.utils.Utils;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.errors.InvalidStateStoreException;
+import org.apache.kafka.streams.processor.StateStoreContext;
+import org.apache.kafka.streams.state.KeyValueIterator;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.StreamsTestUtils;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentTest {
+
+    private static final String STORE_NAME = "physical-rocks";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+    private static final Serializer<String> STRING_SERIALIZER = new 
StringSerializer();
+    private static final Deserializer<String> STRING_DESERIALIZER = new 
StringDeserializer();
+
+    private RocksDBStore physicalStore;
+
+    private LogicalKeyValueSegment segment1;
+    private LogicalKeyValueSegment segment2;
+
+    @Before
+    public void setUp() {
+        physicalStore = new RocksDBStore(STORE_NAME, DB_FILE_DIR, new 
RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME), false);
+        physicalStore.init((StateStoreContext) new 
InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.String(),
+            new StreamsConfig(StreamsTestUtils.getStreamsConfig())
+        ), physicalStore);
+
+        segment1 = new LogicalKeyValueSegment(1, "segment-1", physicalStore);
+        segment2 = new LogicalKeyValueSegment(2, "segment-2", physicalStore);
+    }
+
+    @After
+    public void tearDown() {
+        segment1.close();
+        segment2.close();
+        physicalStore.close();
+    }
+
+    @Test
+    public void shouldPut() {
+        final KeyValue<String, String> kv0 = new KeyValue<>("1", "a");
+        final KeyValue<String, String> kv1 = new KeyValue<>("2", "b");
+
+        segment1.put(new Bytes(kv0.key.getBytes(UTF_8)), 
kv0.value.getBytes(UTF_8));
+        segment1.put(new Bytes(kv1.key.getBytes(UTF_8)), 
kv1.value.getBytes(UTF_8));
+        segment2.put(new Bytes(kv0.key.getBytes(UTF_8)), 
kv0.value.getBytes(UTF_8));
+        segment2.put(new Bytes(kv1.key.getBytes(UTF_8)), 
kv1.value.getBytes(UTF_8));
+
+        assertEquals("a", getAndDeserialize(segment1, "1"));

Review Comment:
   Should we also `get` on the physical store to see if the logic works as 
expected? (Also for other tests)



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentsTest.java:
##########
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.List;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.streams.processor.internals.MockStreamsMetrics;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.MockRecordCollector;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentsTest {
+
+    private static final long SEGMENT_INTERVAL = 100L;
+    private static final long RETENTION_PERIOD = 4 * SEGMENT_INTERVAL;
+    private static final String STORE_NAME = "logical-segments";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+
+    private InternalMockProcessorContext context;
+
+    private LogicalKeyValueSegments segments;
+
+    @Before
+    public void setUp() {
+        context = new InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.Long(),
+            new MockRecordCollector(),
+            new ThreadCache(new LogContext("testCache "), 0, new 
MockStreamsMetrics(new Metrics()))
+        );
+        segments = new LogicalKeyValueSegments(
+            STORE_NAME,
+            DB_FILE_DIR,
+            RETENTION_PERIOD,
+            SEGMENT_INTERVAL,
+            new RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME)
+        );
+        segments.openExisting(context, -1L);
+    }
+
+    @After
+    public void tearDown() {
+        segments.close();
+    }
+
+    @Test
+    public void shouldGetSegmentIdsFromTimestamp() {
+        assertEquals(0, segments.segmentId(0));
+        assertEquals(1, segments.segmentId(SEGMENT_INTERVAL));
+        assertEquals(2, segments.segmentId(2 * SEGMENT_INTERVAL));
+        assertEquals(3, segments.segmentId(3 * SEGMENT_INTERVAL));
+    }
+
+    @Test
+    public void shouldCreateSegments() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, -1L);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(1, context, -1L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(2, context, -1L);
+
+        final File rocksdbDir = new File(new File(context.stateDir(), 
DB_FILE_DIR), STORE_NAME);
+        assertTrue(rocksdbDir.isDirectory());
+
+        assertTrue(segment1.isOpen());
+        assertTrue(segment2.isOpen());
+        assertTrue(segment3.isOpen());
+    }
+
+    @Test
+    public void shouldNotCreateSegmentThatIsAlreadyExpired() {
+        final long streamTime = updateStreamTimeAndCreateSegment(7);
+        assertNull(segments.getOrCreateSegmentIfLive(0, context, streamTime));
+    }
+
+    @Test
+    public void shouldCleanupSegmentsThatHaveExpired() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, 0);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(0, context, SEGMENT_INTERVAL * 2L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(3, context, SEGMENT_INTERVAL * 3L);
+        final LogicalKeyValueSegment segment4 = 
segments.getOrCreateSegmentIfLive(7, context, SEGMENT_INTERVAL * 7L);
+
+        final List<LogicalKeyValueSegment> allSegments = 
segments.allSegments(true);
+        assertEquals(2, allSegments.size());
+        assertEquals(segment3, allSegments.get(0));
+        assertEquals(segment4, allSegments.get(1));
+    }
+
+    @Test
+    public void shouldGetSegmentForTimestamp() {
+        final LogicalKeyValueSegment segment = 
segments.getOrCreateSegmentIfLive(0, context, -1L);
+        segments.getOrCreateSegmentIfLive(1, context, -1L);

Review Comment:
   Why are we "re-creating" the segment? If we want to test "forTimestamp" 
should we test upper and lower bound of the segment?



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentTest.java:
##########
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.serialization.Serializer;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.kafka.common.utils.Bytes;
+import org.apache.kafka.common.utils.Utils;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.errors.InvalidStateStoreException;
+import org.apache.kafka.streams.processor.StateStoreContext;
+import org.apache.kafka.streams.state.KeyValueIterator;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.StreamsTestUtils;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentTest {
+
+    private static final String STORE_NAME = "physical-rocks";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+    private static final Serializer<String> STRING_SERIALIZER = new 
StringSerializer();
+    private static final Deserializer<String> STRING_DESERIALIZER = new 
StringDeserializer();
+
+    private RocksDBStore physicalStore;
+
+    private LogicalKeyValueSegment segment1;
+    private LogicalKeyValueSegment segment2;
+
+    @Before
+    public void setUp() {
+        physicalStore = new RocksDBStore(STORE_NAME, DB_FILE_DIR, new 
RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME), false);
+        physicalStore.init((StateStoreContext) new 
InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.String(),
+            new StreamsConfig(StreamsTestUtils.getStreamsConfig())
+        ), physicalStore);
+
+        segment1 = new LogicalKeyValueSegment(1, "segment-1", physicalStore);
+        segment2 = new LogicalKeyValueSegment(2, "segment-2", physicalStore);
+    }
+
+    @After
+    public void tearDown() {
+        segment1.close();
+        segment2.close();
+        physicalStore.close();
+    }
+
+    @Test
+    public void shouldPut() {
+        final KeyValue<String, String> kv0 = new KeyValue<>("1", "a");
+        final KeyValue<String, String> kv1 = new KeyValue<>("2", "b");
+
+        segment1.put(new Bytes(kv0.key.getBytes(UTF_8)), 
kv0.value.getBytes(UTF_8));
+        segment1.put(new Bytes(kv1.key.getBytes(UTF_8)), 
kv1.value.getBytes(UTF_8));
+        segment2.put(new Bytes(kv0.key.getBytes(UTF_8)), 
kv0.value.getBytes(UTF_8));
+        segment2.put(new Bytes(kv1.key.getBytes(UTF_8)), 
kv1.value.getBytes(UTF_8));
+
+        assertEquals("a", getAndDeserialize(segment1, "1"));
+        assertEquals("b", getAndDeserialize(segment1, "2"));
+        assertEquals("a", getAndDeserialize(segment2, "1"));
+        assertEquals("b", getAndDeserialize(segment2, "2"));
+    }
+
+    @Test
+    public void shouldPutAll() {
+        final List<KeyValue<Bytes, byte[]>> entries = new ArrayList<>();
+        entries.add(new KeyValue<>(
+            new Bytes(STRING_SERIALIZER.serialize(null, "1")),
+            STRING_SERIALIZER.serialize(null, "a")));
+        entries.add(new KeyValue<>(
+            new Bytes(STRING_SERIALIZER.serialize(null, "2")),
+            STRING_SERIALIZER.serialize(null, "b")));
+        entries.add(new KeyValue<>(
+            new Bytes(STRING_SERIALIZER.serialize(null, "3")),
+            STRING_SERIALIZER.serialize(null, "c")));
+
+        segment1.putAll(entries);
+        segment2.putAll(entries);
+
+        assertEquals("a", getAndDeserialize(segment1, "1"));
+        assertEquals("b", getAndDeserialize(segment1, "2"));
+        assertEquals("c", getAndDeserialize(segment1, "3"));
+        assertEquals("a", getAndDeserialize(segment2, "1"));
+        assertEquals("b", getAndDeserialize(segment2, "2"));
+        assertEquals("c", getAndDeserialize(segment2, "3"));
+    }
+
+    @Test
+    public void shouldPutIfAbsent() {
+        final Bytes keyBytes = new Bytes(STRING_SERIALIZER.serialize(null, 
"one"));
+        final byte[] valueBytes = STRING_SERIALIZER.serialize(null, "A");
+        final byte[] valueBytesUpdate = STRING_SERIALIZER.serialize(null, "B");
+
+        segment1.putIfAbsent(keyBytes, valueBytes);
+        segment1.putIfAbsent(keyBytes, valueBytesUpdate);
+        segment2.putIfAbsent(keyBytes, valueBytesUpdate);
+
+        assertEquals("A", STRING_DESERIALIZER.deserialize(null, 
segment1.get(keyBytes)));
+        assertEquals("B", STRING_DESERIALIZER.deserialize(null, 
segment2.get(keyBytes)));
+    }
+
+    @Test
+    public void shouldDelete() {
+        final KeyValue<String, String> kv0 = new KeyValue<>("1", "a");
+        final KeyValue<String, String> kv1 = new KeyValue<>("2", "b");
+
+        segment1.put(new Bytes(kv0.key.getBytes(UTF_8)), 
kv0.value.getBytes(UTF_8));
+        segment1.put(new Bytes(kv1.key.getBytes(UTF_8)), 
kv1.value.getBytes(UTF_8));
+        segment2.put(new Bytes(kv0.key.getBytes(UTF_8)), 
kv0.value.getBytes(UTF_8));
+        segment2.put(new Bytes(kv1.key.getBytes(UTF_8)), 
kv1.value.getBytes(UTF_8));
+        segment1.delete(new Bytes(kv0.key.getBytes(UTF_8)));
+
+        assertNull(segment1.get(new Bytes(kv0.key.getBytes(UTF_8))));
+        assertEquals("b", getAndDeserialize(segment1, "2"));
+        assertEquals("a", getAndDeserialize(segment2, "1"));
+        assertEquals("b", getAndDeserialize(segment2, "2"));
+    }
+
+    @Test
+    public void shouldReturnValuesOnRange() {
+        final KeyValue<String, String> kv0 = new KeyValue<>("0", "zero");
+        final KeyValue<String, String> kv1 = new KeyValue<>("1", "one");
+        final KeyValue<String, String> kv2 = new KeyValue<>("2", "two");
+        final KeyValue<String, String> kvOther = new KeyValue<>("1", "other");
+
+        segment1.put(new Bytes(kv0.key.getBytes(UTF_8)), 
kv0.value.getBytes(UTF_8));
+        segment1.put(new Bytes(kv1.key.getBytes(UTF_8)), 
kv1.value.getBytes(UTF_8));
+        segment1.put(new Bytes(kv2.key.getBytes(UTF_8)), 
kv2.value.getBytes(UTF_8));
+        segment2.put(new Bytes(kvOther.key.getBytes(UTF_8)), 
kvOther.value.getBytes(UTF_8));
+
+        final LinkedList<KeyValue<String, String>> expectedContents = new 
LinkedList<>();
+        expectedContents.add(kv0);
+        expectedContents.add(kv1);
+
+        try (final KeyValueIterator<Bytes, byte[]> iterator = 
segment1.range(null, new Bytes(STRING_SERIALIZER.serialize(null, "1")))) {

Review Comment:
   Should we test different ranges? All lower and upper bound null/not-null 
combination?



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentsTest.java:
##########
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.List;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.streams.processor.internals.MockStreamsMetrics;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.MockRecordCollector;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentsTest {
+
+    private static final long SEGMENT_INTERVAL = 100L;
+    private static final long RETENTION_PERIOD = 4 * SEGMENT_INTERVAL;
+    private static final String STORE_NAME = "logical-segments";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+
+    private InternalMockProcessorContext context;
+
+    private LogicalKeyValueSegments segments;
+
+    @Before
+    public void setUp() {
+        context = new InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.Long(),
+            new MockRecordCollector(),
+            new ThreadCache(new LogContext("testCache "), 0, new 
MockStreamsMetrics(new Metrics()))
+        );
+        segments = new LogicalKeyValueSegments(
+            STORE_NAME,
+            DB_FILE_DIR,
+            RETENTION_PERIOD,
+            SEGMENT_INTERVAL,
+            new RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME)
+        );
+        segments.openExisting(context, -1L);
+    }
+
+    @After
+    public void tearDown() {
+        segments.close();
+    }
+
+    @Test
+    public void shouldGetSegmentIdsFromTimestamp() {
+        assertEquals(0, segments.segmentId(0));
+        assertEquals(1, segments.segmentId(SEGMENT_INTERVAL));
+        assertEquals(2, segments.segmentId(2 * SEGMENT_INTERVAL));
+        assertEquals(3, segments.segmentId(3 * SEGMENT_INTERVAL));
+    }
+
+    @Test
+    public void shouldCreateSegments() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, -1L);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(1, context, -1L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(2, context, -1L);
+
+        final File rocksdbDir = new File(new File(context.stateDir(), 
DB_FILE_DIR), STORE_NAME);
+        assertTrue(rocksdbDir.isDirectory());
+
+        assertTrue(segment1.isOpen());
+        assertTrue(segment2.isOpen());
+        assertTrue(segment3.isOpen());
+    }
+
+    @Test
+    public void shouldNotCreateSegmentThatIsAlreadyExpired() {
+        final long streamTime = updateStreamTimeAndCreateSegment(7);
+        assertNull(segments.getOrCreateSegmentIfLive(0, context, streamTime));
+    }
+
+    @Test
+    public void shouldCleanupSegmentsThatHaveExpired() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, 0);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(0, context, SEGMENT_INTERVAL * 2L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(3, context, SEGMENT_INTERVAL * 3L);
+        final LogicalKeyValueSegment segment4 = 
segments.getOrCreateSegmentIfLive(7, context, SEGMENT_INTERVAL * 7L);
+
+        final List<LogicalKeyValueSegment> allSegments = 
segments.allSegments(true);
+        assertEquals(2, allSegments.size());
+        assertEquals(segment3, allSegments.get(0));
+        assertEquals(segment4, allSegments.get(1));
+    }
+
+    @Test
+    public void shouldGetSegmentForTimestamp() {

Review Comment:
   Ab above



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentsTest.java:
##########
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.List;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.streams.processor.internals.MockStreamsMetrics;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.MockRecordCollector;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentsTest {
+
+    private static final long SEGMENT_INTERVAL = 100L;
+    private static final long RETENTION_PERIOD = 4 * SEGMENT_INTERVAL;
+    private static final String STORE_NAME = "logical-segments";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+
+    private InternalMockProcessorContext context;
+
+    private LogicalKeyValueSegments segments;
+
+    @Before
+    public void setUp() {
+        context = new InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.Long(),
+            new MockRecordCollector(),
+            new ThreadCache(new LogContext("testCache "), 0, new 
MockStreamsMetrics(new Metrics()))
+        );
+        segments = new LogicalKeyValueSegments(
+            STORE_NAME,
+            DB_FILE_DIR,
+            RETENTION_PERIOD,
+            SEGMENT_INTERVAL,
+            new RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME)
+        );
+        segments.openExisting(context, -1L);
+    }
+
+    @After
+    public void tearDown() {
+        segments.close();
+    }
+
+    @Test
+    public void shouldGetSegmentIdsFromTimestamp() {
+        assertEquals(0, segments.segmentId(0));
+        assertEquals(1, segments.segmentId(SEGMENT_INTERVAL));
+        assertEquals(2, segments.segmentId(2 * SEGMENT_INTERVAL));
+        assertEquals(3, segments.segmentId(3 * SEGMENT_INTERVAL));
+    }
+
+    @Test
+    public void shouldCreateSegments() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, -1L);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(1, context, -1L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(2, context, -1L);
+
+        final File rocksdbDir = new File(new File(context.stateDir(), 
DB_FILE_DIR), STORE_NAME);
+        assertTrue(rocksdbDir.isDirectory());
+
+        assertTrue(segment1.isOpen());
+        assertTrue(segment2.isOpen());
+        assertTrue(segment3.isOpen());
+    }
+
+    @Test
+    public void shouldNotCreateSegmentThatIsAlreadyExpired() {
+        final long streamTime = updateStreamTimeAndCreateSegment(7);
+        assertNull(segments.getOrCreateSegmentIfLive(0, context, streamTime));
+    }
+
+    @Test
+    public void shouldCleanupSegmentsThatHaveExpired() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, 0);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(0, context, SEGMENT_INTERVAL * 2L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(3, context, SEGMENT_INTERVAL * 3L);
+        final LogicalKeyValueSegment segment4 = 
segments.getOrCreateSegmentIfLive(7, context, SEGMENT_INTERVAL * 7L);
+
+        final List<LogicalKeyValueSegment> allSegments = 
segments.allSegments(true);
+        assertEquals(2, allSegments.size());
+        assertEquals(segment3, allSegments.get(0));
+        assertEquals(segment4, allSegments.get(1));
+    }
+
+    @Test
+    public void shouldGetSegmentForTimestamp() {
+        final LogicalKeyValueSegment segment = 
segments.getOrCreateSegmentIfLive(0, context, -1L);
+        segments.getOrCreateSegmentIfLive(1, context, -1L);
+        assertEquals(segment, segments.getSegmentForTimestamp(0L));
+    }
+
+    @Test
+    public void shouldGetSegmentsWithinTimeRange() {

Review Comment:
   ab above



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentsTest.java:
##########
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.List;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.streams.processor.internals.MockStreamsMetrics;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.MockRecordCollector;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentsTest {
+
+    private static final long SEGMENT_INTERVAL = 100L;
+    private static final long RETENTION_PERIOD = 4 * SEGMENT_INTERVAL;
+    private static final String STORE_NAME = "logical-segments";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+
+    private InternalMockProcessorContext context;
+
+    private LogicalKeyValueSegments segments;
+
+    @Before
+    public void setUp() {
+        context = new InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.Long(),
+            new MockRecordCollector(),
+            new ThreadCache(new LogContext("testCache "), 0, new 
MockStreamsMetrics(new Metrics()))
+        );
+        segments = new LogicalKeyValueSegments(
+            STORE_NAME,
+            DB_FILE_DIR,
+            RETENTION_PERIOD,
+            SEGMENT_INTERVAL,
+            new RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME)
+        );
+        segments.openExisting(context, -1L);
+    }
+
+    @After
+    public void tearDown() {
+        segments.close();
+    }
+
+    @Test
+    public void shouldGetSegmentIdsFromTimestamp() {
+        assertEquals(0, segments.segmentId(0));
+        assertEquals(1, segments.segmentId(SEGMENT_INTERVAL));
+        assertEquals(2, segments.segmentId(2 * SEGMENT_INTERVAL));
+        assertEquals(3, segments.segmentId(3 * SEGMENT_INTERVAL));
+    }
+
+    @Test
+    public void shouldCreateSegments() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, -1L);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(1, context, -1L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(2, context, -1L);
+
+        final File rocksdbDir = new File(new File(context.stateDir(), 
DB_FILE_DIR), STORE_NAME);
+        assertTrue(rocksdbDir.isDirectory());
+
+        assertTrue(segment1.isOpen());
+        assertTrue(segment2.isOpen());
+        assertTrue(segment3.isOpen());
+    }
+
+    @Test
+    public void shouldNotCreateSegmentThatIsAlreadyExpired() {
+        final long streamTime = updateStreamTimeAndCreateSegment(7);
+        assertNull(segments.getOrCreateSegmentIfLive(0, context, streamTime));
+    }
+
+    @Test
+    public void shouldCleanupSegmentsThatHaveExpired() {

Review Comment:
   Sound like we test `AbstractSegments` logic here -- do we need to do this?



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentsTest.java:
##########
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.List;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.streams.processor.internals.MockStreamsMetrics;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.MockRecordCollector;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentsTest {
+
+    private static final long SEGMENT_INTERVAL = 100L;
+    private static final long RETENTION_PERIOD = 4 * SEGMENT_INTERVAL;
+    private static final String STORE_NAME = "logical-segments";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+
+    private InternalMockProcessorContext context;
+
+    private LogicalKeyValueSegments segments;
+
+    @Before
+    public void setUp() {
+        context = new InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.Long(),
+            new MockRecordCollector(),
+            new ThreadCache(new LogContext("testCache "), 0, new 
MockStreamsMetrics(new Metrics()))
+        );
+        segments = new LogicalKeyValueSegments(
+            STORE_NAME,
+            DB_FILE_DIR,
+            RETENTION_PERIOD,
+            SEGMENT_INTERVAL,
+            new RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME)
+        );
+        segments.openExisting(context, -1L);
+    }
+
+    @After
+    public void tearDown() {
+        segments.close();
+    }
+
+    @Test
+    public void shouldGetSegmentIdsFromTimestamp() {
+        assertEquals(0, segments.segmentId(0));
+        assertEquals(1, segments.segmentId(SEGMENT_INTERVAL));
+        assertEquals(2, segments.segmentId(2 * SEGMENT_INTERVAL));
+        assertEquals(3, segments.segmentId(3 * SEGMENT_INTERVAL));
+    }
+
+    @Test
+    public void shouldCreateSegments() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, -1L);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(1, context, -1L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(2, context, -1L);
+
+        final File rocksdbDir = new File(new File(context.stateDir(), 
DB_FILE_DIR), STORE_NAME);
+        assertTrue(rocksdbDir.isDirectory());
+
+        assertTrue(segment1.isOpen());
+        assertTrue(segment2.isOpen());
+        assertTrue(segment3.isOpen());
+    }
+
+    @Test
+    public void shouldNotCreateSegmentThatIsAlreadyExpired() {
+        final long streamTime = updateStreamTimeAndCreateSegment(7);
+        assertNull(segments.getOrCreateSegmentIfLive(0, context, streamTime));
+    }
+
+    @Test
+    public void shouldCleanupSegmentsThatHaveExpired() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, 0);
+        final LogicalKeyValueSegment segment2 = 
segments.getOrCreateSegmentIfLive(0, context, SEGMENT_INTERVAL * 2L);
+        final LogicalKeyValueSegment segment3 = 
segments.getOrCreateSegmentIfLive(3, context, SEGMENT_INTERVAL * 3L);
+        final LogicalKeyValueSegment segment4 = 
segments.getOrCreateSegmentIfLive(7, context, SEGMENT_INTERVAL * 7L);
+
+        final List<LogicalKeyValueSegment> allSegments = 
segments.allSegments(true);
+        assertEquals(2, allSegments.size());
+        assertEquals(segment3, allSegments.get(0));
+        assertEquals(segment4, allSegments.get(1));
+    }
+
+    @Test
+    public void shouldGetSegmentForTimestamp() {
+        final LogicalKeyValueSegment segment = 
segments.getOrCreateSegmentIfLive(0, context, -1L);

Review Comment:
   nit: `-1L` does not sound like a valid `streamTime` to be passed in?



##########
streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentsTest.java:
##########
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.state.internals;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.List;
+import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.streams.processor.internals.MockStreamsMetrics;
+import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder;
+import org.apache.kafka.test.InternalMockProcessorContext;
+import org.apache.kafka.test.MockRecordCollector;
+import org.apache.kafka.test.TestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogicalKeyValueSegmentsTest {
+
+    private static final long SEGMENT_INTERVAL = 100L;
+    private static final long RETENTION_PERIOD = 4 * SEGMENT_INTERVAL;
+    private static final String STORE_NAME = "logical-segments";
+    private static final String METRICS_SCOPE = "metrics-scope";
+    private static final String DB_FILE_DIR = "rocksdb";
+
+    private InternalMockProcessorContext context;
+
+    private LogicalKeyValueSegments segments;
+
+    @Before
+    public void setUp() {
+        context = new InternalMockProcessorContext<>(
+            TestUtils.tempDirectory(),
+            Serdes.String(),
+            Serdes.Long(),
+            new MockRecordCollector(),
+            new ThreadCache(new LogContext("testCache "), 0, new 
MockStreamsMetrics(new Metrics()))
+        );
+        segments = new LogicalKeyValueSegments(
+            STORE_NAME,
+            DB_FILE_DIR,
+            RETENTION_PERIOD,
+            SEGMENT_INTERVAL,
+            new RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME)
+        );
+        segments.openExisting(context, -1L);
+    }
+
+    @After
+    public void tearDown() {
+        segments.close();
+    }
+
+    @Test
+    public void shouldGetSegmentIdsFromTimestamp() {
+        assertEquals(0, segments.segmentId(0));
+        assertEquals(1, segments.segmentId(SEGMENT_INTERVAL));
+        assertEquals(2, segments.segmentId(2 * SEGMENT_INTERVAL));
+        assertEquals(3, segments.segmentId(3 * SEGMENT_INTERVAL));
+    }
+
+    @Test
+    public void shouldCreateSegments() {
+        final LogicalKeyValueSegment segment1 = 
segments.getOrCreateSegmentIfLive(0, context, -1L);

Review Comment:
   Should we call `getOrCreateSegment` instead? Otherwise we mainly test the 
logic of `AbstractSegments` ? 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to