showuon commented on code in PR #16475:
URL: https://github.com/apache/kafka/pull/16475#discussion_r1666570830
##########
core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala:
##########
@@ -243,6 +246,216 @@ class DumpLogSegmentsTest {
assertEquals(Map.empty, errors.shallowOffsetNotFound)
}
+ def countSubstring(str: String, sub: String): Int =
+ str.sliding(sub.length).count(_ == sub)
+
+ // the number of batches in the log dump is equal to
+ // the number of occurrences of the "baseOffset:" substring
+ def batchCount(str: String): Int =
+ countSubstring(str, "baseOffset:")
+
+ // the number of records in the log dump is equal to
+ // the number of occurrences of the "payload:" substring
+ def recordCount(str: String): Int =
+ countSubstring(str, "payload:")
+
+ @Test
+ def testDumpRemoteLogMetadataEmpty(): Unit = {
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 0)
+ assertTrue(recordCount(output) == 0)
+ assertTrue(output.contains("Log starting offset: 0"))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataOneRecordOneBatch(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val records: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 1)
+ assertTrue(recordCount(output) == 1)
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataMultipleRecordsOneBatch(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+ val remoteSegmentId = Uuid.randomUuid
+
+ val topicIdPartition = new TopicIdPartition(topicId, new
TopicPartition(topicName, 0))
+ val remoteLogSegmentId = new RemoteLogSegmentId(topicIdPartition,
remoteSegmentId)
+
+ val metadata = Seq(new RemoteLogSegmentMetadataUpdate(remoteLogSegmentId,
time.milliseconds,
+ Optional.of(new RemoteLogSegmentMetadata.CustomMetadata(Array[Byte](0,
1, 2, 3))), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, 0),
+ new RemotePartitionDeleteMetadata(topicIdPartition,
RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds, 0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedUpdatePayload =
String.format("RemoteLogSegmentMetadataUpdate{remoteLogSegmentId=" +
+ "RemoteLogSegmentId{topicIdPartition=%s:%s-0, id=%s},
customMetadata=Optional[" +
+ "CustomMetadata{4 bytes}], state=COPY_SEGMENT_FINISHED,
eventTimestampMs=0, brokerId=0}", topicId, topicName, remoteSegmentId)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 1)
+ assertTrue(recordCount(output) == 2)
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains(expectedUpdatePayload))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataMultipleRecordsMultipleBatches(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+ val remoteSegmentId = Uuid.randomUuid
+
+ val topicIdPartition = new TopicIdPartition(topicId, new
TopicPartition(topicName, 0))
+ val remoteLogSegmentId = new RemoteLogSegmentId(topicIdPartition,
remoteSegmentId)
+
+ val metadata = Seq(
+ new RemoteLogSegmentMetadataUpdate(remoteLogSegmentId, time.milliseconds,
+ Optional.of(new RemoteLogSegmentMetadata.CustomMetadata(Array[Byte](0,
1, 2, 3))), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, 0),
+ new RemotePartitionDeleteMetadata(topicIdPartition,
RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds, 0)
+ )
+
+ val records: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedUpdatePayload =
String.format("RemoteLogSegmentMetadataUpdate{remoteLogSegmentId=" +
+ "RemoteLogSegmentId{topicIdPartition=%s:%s-0, id=%s},
customMetadata=Optional[" +
+ "CustomMetadata{4 bytes}], state=COPY_SEGMENT_FINISHED,
eventTimestampMs=0, brokerId=0}", topicId, topicName, remoteSegmentId)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 2)
+ assertTrue(recordCount(output) == 4)
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(countSubstring(output, expectedUpdatePayload) == 2)
+ assertTrue(countSubstring(output, expectedDeletePayload) == 2)
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataNonZeroStartingOffset(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val memoryRecordsSizeInBytes = MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*).sizeInBytes()
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes =
memoryRecordsSizeInBytes)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val logPaths =
logDir.listFiles.filter(_.getName.endsWith(".log")).map(_.getAbsolutePath)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logPaths(1)))
+ assertTrue(batchCount(output) == 1)
+ assertTrue(recordCount(output) == 1)
+ assertTrue(output.contains("Log starting offset: 1"))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataWithCorruption(): Unit = {
+ val metadataRecords = Array(new SimpleRecord(null, "corrupted".getBytes()))
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 1)
+ assertTrue(recordCount(output) == 1)
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains("Could not deserialize metadata record"))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataIoException(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ Files.setPosixFilePermissions(Paths.get(logFilePath),
PosixFilePermissions.fromString("-w-------"))
Review Comment:
Could we successfully delete this file in the `afterEach` method?
##########
core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala:
##########
@@ -243,6 +246,216 @@ class DumpLogSegmentsTest {
assertEquals(Map.empty, errors.shallowOffsetNotFound)
}
+ def countSubstring(str: String, sub: String): Int =
+ str.sliding(sub.length).count(_ == sub)
+
+ // the number of batches in the log dump is equal to
+ // the number of occurrences of the "baseOffset:" substring
+ def batchCount(str: String): Int =
+ countSubstring(str, "baseOffset:")
+
+ // the number of records in the log dump is equal to
+ // the number of occurrences of the "payload:" substring
+ def recordCount(str: String): Int =
+ countSubstring(str, "payload:")
+
+ @Test
+ def testDumpRemoteLogMetadataEmpty(): Unit = {
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 0)
+ assertTrue(recordCount(output) == 0)
+ assertTrue(output.contains("Log starting offset: 0"))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataOneRecordOneBatch(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val records: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 1)
+ assertTrue(recordCount(output) == 1)
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataMultipleRecordsOneBatch(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+ val remoteSegmentId = Uuid.randomUuid
+
+ val topicIdPartition = new TopicIdPartition(topicId, new
TopicPartition(topicName, 0))
+ val remoteLogSegmentId = new RemoteLogSegmentId(topicIdPartition,
remoteSegmentId)
+
+ val metadata = Seq(new RemoteLogSegmentMetadataUpdate(remoteLogSegmentId,
time.milliseconds,
+ Optional.of(new RemoteLogSegmentMetadata.CustomMetadata(Array[Byte](0,
1, 2, 3))), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, 0),
+ new RemotePartitionDeleteMetadata(topicIdPartition,
RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds, 0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedUpdatePayload =
String.format("RemoteLogSegmentMetadataUpdate{remoteLogSegmentId=" +
+ "RemoteLogSegmentId{topicIdPartition=%s:%s-0, id=%s},
customMetadata=Optional[" +
+ "CustomMetadata{4 bytes}], state=COPY_SEGMENT_FINISHED,
eventTimestampMs=0, brokerId=0}", topicId, topicName, remoteSegmentId)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 1)
+ assertTrue(recordCount(output) == 2)
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains(expectedUpdatePayload))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataMultipleRecordsMultipleBatches(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+ val remoteSegmentId = Uuid.randomUuid
+
+ val topicIdPartition = new TopicIdPartition(topicId, new
TopicPartition(topicName, 0))
+ val remoteLogSegmentId = new RemoteLogSegmentId(topicIdPartition,
remoteSegmentId)
+
+ val metadata = Seq(
+ new RemoteLogSegmentMetadataUpdate(remoteLogSegmentId, time.milliseconds,
+ Optional.of(new RemoteLogSegmentMetadata.CustomMetadata(Array[Byte](0,
1, 2, 3))), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, 0),
+ new RemotePartitionDeleteMetadata(topicIdPartition,
RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds, 0)
+ )
+
+ val records: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedUpdatePayload =
String.format("RemoteLogSegmentMetadataUpdate{remoteLogSegmentId=" +
+ "RemoteLogSegmentId{topicIdPartition=%s:%s-0, id=%s},
customMetadata=Optional[" +
+ "CustomMetadata{4 bytes}], state=COPY_SEGMENT_FINISHED,
eventTimestampMs=0, brokerId=0}", topicId, topicName, remoteSegmentId)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 2)
+ assertTrue(recordCount(output) == 4)
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(countSubstring(output, expectedUpdatePayload) == 2)
+ assertTrue(countSubstring(output, expectedDeletePayload) == 2)
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataNonZeroStartingOffset(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val memoryRecordsSizeInBytes = MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*).sizeInBytes()
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes =
memoryRecordsSizeInBytes)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val logPaths =
logDir.listFiles.filter(_.getName.endsWith(".log")).map(_.getAbsolutePath)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logPaths(1)))
+ assertTrue(batchCount(output) == 1)
+ assertTrue(recordCount(output) == 1)
+ assertTrue(output.contains("Log starting offset: 1"))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataWithCorruption(): Unit = {
+ val metadataRecords = Array(new SimpleRecord(null, "corrupted".getBytes()))
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(batchCount(output) == 1)
+ assertTrue(recordCount(output) == 1)
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains("Could not deserialize metadata record"))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataIoException(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ Files.setPosixFilePermissions(Paths.get(logFilePath),
PosixFilePermissions.fromString("-w-------"))
+
+ assertThrows(classOf[AccessDeniedException],
+ () => runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath)))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataNoFilesFlag(): Unit = {
+ Exit.setExitProcedure((_, message) => throw new
IllegalArgumentException(message.orNull))
+ val thrown = assertThrows(classOf[IllegalArgumentException], () =>
runDumpLogSegments(Array("--remote-log-metadata-decoder")))
+ Exit.resetExitProcedure()
+ assertTrue(thrown.getMessage.equals("Missing required argument
\"[files]\""))
Review Comment:
This is a little dangerous because if the assertThrow fails, we won't
resetExitProcedure anymore. Please help fix it. Thanks.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]