jsancio commented on code in PR #12655:
URL: https://github.com/apache/kafka/pull/12655#discussion_r973504381


##########
core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala:
##########
@@ -869,6 +869,56 @@ final class KafkaMetadataLogTest {
       })
     })
   }
+
+  @Test
+  def testSegmentsLessThanLatestSnapshot(): Unit = {
+    val config = DefaultMetadataLogConfig.copy(
+      logSegmentBytes = 10240,
+      logSegmentMinBytes = 10240,
+      logSegmentMillis = 10 * 1000,
+      retentionMaxBytes = 10240,
+      retentionMillis = 60 * 1000,
+      maxBatchSizeInBytes = 200
+    )
+    val log = buildMetadataLog(tempDir, mockTime, config)
+
+    // Generate enough data to cause a segment roll
+    for (_ <- 0 to 2000) {
+      append(log, 10, 1)
+    }
+    log.updateHighWatermark(new LogOffsetMetadata(log.endOffset.offset))
+
+    // The clean up code requires that there are at least two snapshots
+    // Generate first snapshots that includes the first segment by using the 
base offset of the second segment
+    val snapshotId1 = new OffsetAndEpoch(
+      log.log.logSegments.drop(1).head.baseOffset,
+      1
+    )
+    TestUtils.resource(log.storeSnapshot(snapshotId1).get()) { snapshot =>
+      snapshot.freeze()
+    }
+    // Generate second snapshots that includes the second segment by using the 
base offset of the third segment
+    val snapshotId2 = new OffsetAndEpoch(
+      log.log.logSegments.drop(2).head.baseOffset,
+      1
+    )
+    TestUtils.resource(log.storeSnapshot(snapshotId2).get()) { snapshot =>
+      snapshot.freeze()
+    }
+
+    // Sleep long enough to trigger a possible segment delete because of the 
default retention
+    val defaultLogRetentionMs = Defaults.RetentionMs * 2
+    mockTime.sleep(defaultLogRetentionMs)
+
+    assertTrue(log.maybeClean())
+    assertEquals(1, log.snapshotCount())
+    assertTrue(log.startOffset > 0, s"${log.startOffset} must be greater than 
0")
+    val latestSnapshotOffset = log.latestSnapshotId().get.offset
+    assertTrue(
+      latestSnapshotOffset >= log.startOffset,
+      s"latest snapshot offset ($latestSnapshotOffset) must be >= log start 
offset (${log.startOffset})"
+    )

Review Comment:
   Without this change this check fails with:
   ```
   > Task :core:test FAILED
   kafka.raft.KafkaMetadataLogTest.testSegmentLessThanLatestSnapshot() failed, 
log available in 
/home/jsancio/work/kafka/core/build/reports/testOutput/kafka.raft.KafkaMetadataLogTest.testSegmentLessThanLatestSnapshot().test.stdoutKafkaMetadataLogTest
 > testSegmentNotDeleteWithoutSnapshot() FAILED
       org.opentest4j.AssertionFailedError: latest snapshot offset (1440) must 
be >= log start offset (20010) ==> expected: <true> but was: <false>
           at 
org.junit.jupiter.api.AssertionFailureBuilder.build(AssertionFailureBuilder.java:151)
           at 
org.junit.jupiter.api.AssertionFailureBuilder.buildAndThrow(AssertionFailureBuilder.java:132)
           at org.junit.jupiter.api.AssertTrue.failNotTrue(AssertTrue.java:63)
           at org.junit.jupiter.api.AssertTrue.assertTrue(AssertTrue.java:36)
           at org.junit.jupiter.api.Assertions.assertTrue(Assertions.java:210)
           at 
kafka.raft.KafkaMetadataLogTest.testSegmentLessThanLatestSnapshot(KafkaMetadataLogTest.scala:921)
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to