chia7712 commented on code in PR #19371: URL: https://github.com/apache/kafka/pull/19371#discussion_r2045151038
########## core/src/main/scala/kafka/raft/KafkaMetadataLog.scala: ########## @@ -583,35 +584,72 @@ object KafkaMetadataLog extends Logging { scheduler: Scheduler, config: MetadataLogConfig ): KafkaMetadataLog = { - val props = new Properties() - props.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, config.maxBatchSizeInBytes.toString) - props.setProperty(TopicConfig.SEGMENT_BYTES_CONFIG, config.logSegmentBytes.toString) - props.setProperty(TopicConfig.SEGMENT_MS_CONFIG, config.logSegmentMillis.toString) - props.setProperty(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT.toString) - - // Disable time and byte retention when deleting segments - props.setProperty(TopicConfig.RETENTION_MS_CONFIG, "-1") - props.setProperty(TopicConfig.RETENTION_BYTES_CONFIG, "-1") + val props: Properties = settingLogProperties(config) LogConfig.validate(props) val defaultLogConfig = new LogConfig(props) - if (config.logSegmentBytes < config.logSegmentMinBytes) { - throw new InvalidConfigurationException( - s"Cannot set ${KRaftConfigs.METADATA_LOG_SEGMENT_BYTES_CONFIG} below ${config.logSegmentMinBytes}: ${config.logSegmentBytes}" - ) - } else if (defaultLogConfig.retentionMs >= 0) { - throw new InvalidConfigurationException( - s"Cannot set ${TopicConfig.RETENTION_MS_CONFIG} above -1: ${defaultLogConfig.retentionMs}." - ) - } else if (defaultLogConfig.retentionSize >= 0) { - throw new InvalidConfigurationException( - s"Cannot set ${TopicConfig.RETENTION_BYTES_CONFIG} above -1: ${defaultLogConfig.retentionSize}." - ) + validateConfig(config, defaultLogConfig) + + val metadataLog: KafkaMetadataLog = createKafkaMetadataLog(topicPartition, topicId, dataDir, time, scheduler, config, defaultLogConfig) + + printWarningMessage(config, metadataLog) + + // When recovering, truncate fully if the latest snapshot is after the log end offset. This can happen to a follower + // when the follower crashes after downloading a snapshot from the leader but before it could truncate the log fully. + metadataLog.truncateToLatestSnapshot() + + metadataLog + } + + private def printWarningMessage(config: MetadataLogConfig, metadataLog: KafkaMetadataLog): Unit = { + // Print a warning if users have overridden the internal config + if (config.logSegmentMinBytes != KafkaRaftClient.MAX_BATCH_SIZE_BYTES) { + metadataLog.error(s"Overriding ${KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG} is only supported for testing. Setting " + + s"this value too low may lead to an inability to write batches of metadata records.") } + } + + // visible for testing + def internalApply( Review Comment: +1 to move the internal config to `MetadataLogConfig`, and it would be better to wait #19465 extracting the metadata-related configs from other class to `MetadataLogConfig` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org