dengziming commented on code in PR #12084:
URL: https://github.com/apache/kafka/pull/12084#discussion_r883208081
##########
core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala:
##########
@@ -272,10 +278,77 @@ class DumpLogSegmentsTest {
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.NONE, new
SimpleRecord(null, buf.array)), leaderEpoch = 2)
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.NONE,
records:_*), leaderEpoch = 2)
- output = runDumpLogSegments(Array("--cluster-metadata-decoder",
"--skip-record-metadata", "false", "--files", logFilePath))
- assert(output.contains("TOPIC_RECORD"))
- assert(output.contains("BROKER_RECORD"))
- assert(output.contains("skipping"))
+ output = runDumpLogSegments(Array("--cluster-metadata-decoder",
"--skip-record-metadata", "--files", logFilePath))
+ assertTrue(output.contains("TOPIC_RECORD"))
+ assertTrue(output.contains("BROKER_RECORD"))
+ assertTrue(output.contains("skipping"))
+ }
+
+ @Test
+ def testDumpMetadataSnapshot(): Unit = {
+ val metadataRecords = Seq(
+ new ApiMessageAndVersion(
+ new RegisterBrokerRecord().setBrokerId(0).setBrokerEpoch(10),
0.toShort),
+ new ApiMessageAndVersion(
+ new RegisterBrokerRecord().setBrokerId(1).setBrokerEpoch(20),
0.toShort),
+ new ApiMessageAndVersion(
+ new TopicRecord().setName("test-topic").setTopicId(Uuid.randomUuid()),
0.toShort),
+ new ApiMessageAndVersion(
+ new PartitionChangeRecord().setTopicId(Uuid.randomUuid()).setLeader(1).
+ setPartitionId(0).setIsr(util.Arrays.asList(0, 1, 2)), 0.toShort)
+ )
+
+ val metadataLog = KafkaMetadataLog(
+ KafkaRaftServer.MetadataPartition,
+ KafkaRaftServer.MetadataTopicId,
+ logDir,
+ time,
+ time.scheduler,
+ MetadataLogConfig(
+ logSegmentBytes = 100 * 1024,
+ logSegmentMinBytes = 100 * 1024,
+ logSegmentMillis = 10 * 1000,
+ retentionMaxBytes = 100 * 1024,
+ retentionMillis = 60 * 1000,
+ maxBatchSizeInBytes = KafkaRaftClient.MAX_BATCH_SIZE_BYTES,
+ maxFetchSizeInBytes = KafkaRaftClient.MAX_FETCH_SIZE_BYTES,
+ fileDeleteDelayMs = Defaults.FileDeleteDelayMs,
+ nodeId = 1
+ )
+ )
+
+ val lastContainedLogTimestamp = 10000
+
+ TestUtils.resource(
+ RecordsSnapshotWriter.createWithHeader(
+ () => metadataLog.createNewSnapshot(new OffsetAndEpoch(0, 0)),
+ 1024,
+ MemoryPool.NONE,
+ new MockTime,
+ lastContainedLogTimestamp,
+ CompressionType.NONE,
+ new MetadataRecordSerde
+ ).get()
+ ) { snapshotWriter =>
+ snapshotWriter.append(metadataRecords.asJava)
+ snapshotWriter.freeze()
+ }
+
+ var output = runDumpLogSegments(Array("--cluster-metadata-decoder",
"--files", snapshotPath))
+ assertTrue(output.contains("Snapshot end offset: 0, epoch: 0"))
+ assertTrue(output.contains("TOPIC_RECORD"))
+ assertTrue(output.contains("BROKER_RECORD"))
+ assertTrue(output.contains("SnapshotHeader"))
+ assertTrue(output.contains("SnapshotFooter"))
+
assertTrue(output.contains(s"\"lastContainedLogTimestamp\":$lastContainedLogTimestamp"))
+
+ output = runDumpLogSegments(Array("--cluster-metadata-decoder",
"--skip-record-metadata", "--files", snapshotPath))
+ assertTrue(output.contains("Snapshot end offset: 0, epoch: 0"))
+ assertTrue(output.contains("TOPIC_RECORD"))
+ assertTrue(output.contains("BROKER_RECORD"))
+ assertFalse(output.contains("SnapshotHeader"))
+ assertFalse(output.contains("SnapshotFooter"))
+ assertFalse(output.contains(s"\"lastContainedLogTimestamp\":
$lastContainedLogTimestamp"))
Review Comment:
👍, Done.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]