junrao commented on code in PR #19371: URL: https://github.com/apache/kafka/pull/19371#discussion_r2045280340
########## core/src/main/scala/kafka/raft/KafkaMetadataLog.scala: ########## @@ -583,35 +584,72 @@ object KafkaMetadataLog extends Logging { scheduler: Scheduler, config: MetadataLogConfig ): KafkaMetadataLog = { - val props = new Properties() - props.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, config.maxBatchSizeInBytes.toString) - props.setProperty(TopicConfig.SEGMENT_BYTES_CONFIG, config.logSegmentBytes.toString) - props.setProperty(TopicConfig.SEGMENT_MS_CONFIG, config.logSegmentMillis.toString) - props.setProperty(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT.toString) - - // Disable time and byte retention when deleting segments - props.setProperty(TopicConfig.RETENTION_MS_CONFIG, "-1") - props.setProperty(TopicConfig.RETENTION_BYTES_CONFIG, "-1") + val props: Properties = settingLogProperties(config) LogConfig.validate(props) val defaultLogConfig = new LogConfig(props) - if (config.logSegmentBytes < config.logSegmentMinBytes) { - throw new InvalidConfigurationException( - s"Cannot set ${KRaftConfigs.METADATA_LOG_SEGMENT_BYTES_CONFIG} below ${config.logSegmentMinBytes}: ${config.logSegmentBytes}" - ) - } else if (defaultLogConfig.retentionMs >= 0) { - throw new InvalidConfigurationException( - s"Cannot set ${TopicConfig.RETENTION_MS_CONFIG} above -1: ${defaultLogConfig.retentionMs}." - ) - } else if (defaultLogConfig.retentionSize >= 0) { - throw new InvalidConfigurationException( - s"Cannot set ${TopicConfig.RETENTION_BYTES_CONFIG} above -1: ${defaultLogConfig.retentionSize}." - ) + validateConfig(config, defaultLogConfig) + + val metadataLog: KafkaMetadataLog = createKafkaMetadataLog(topicPartition, topicId, dataDir, time, scheduler, config, defaultLogConfig) + + printWarningMessage(config, metadataLog) + + // When recovering, truncate fully if the latest snapshot is after the log end offset. This can happen to a follower + // when the follower crashes after downloading a snapshot from the leader but before it could truncate the log fully. + metadataLog.truncateToLatestSnapshot() + + metadataLog + } + + private def printWarningMessage(config: MetadataLogConfig, metadataLog: KafkaMetadataLog): Unit = { + // Print a warning if users have overridden the internal config + if (config.logSegmentMinBytes != KafkaRaftClient.MAX_BATCH_SIZE_BYTES) { + metadataLog.error(s"Overriding ${KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG} is only supported for testing. Setting " + + s"this value too low may lead to an inability to write batches of metadata records.") } + } + + // visible for testing + def internalApply( Review Comment: I just realized that metadata log uses a different approach to allow tests to use a smaller segment bytes than allowed in production. That approach defines the original segment byte config with a small minimal requirement, but adds METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG to enforce the actual minimal requirement in production. This new config could be changed in tests to allow for smaller minimal bytes. The benefit of this approach is that it allows the existing config to be used directly to set a smaller value for tests. The downside is that the doc for min value is inaccurate and the validation is done through a customized logic. It would be useful to pick the same strategy between metadata log and regular log. The metadata log approach seems slightly better since it's less intrusive. We could fix the inaccurate min value description for production somehow. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org