This is an automated email from the ASF dual-hosted git repository.
mimaison pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new 5631be20a6b MINOR: Remove ZooKeeper mentions in comments (#18646)
5631be20a6b is described below
commit 5631be20a6b10822586ac66a0440d1e4d4466b68
Author: Ken Huang <[email protected]>
AuthorDate: Tue Jan 28 19:35:46 2025 +0800
MINOR: Remove ZooKeeper mentions in comments (#18646)
Reviewers: Mickael Maison <[email protected]>
---
core/src/main/scala/kafka/server/ConfigAdminManager.scala | 13 ++++---------
core/src/main/scala/kafka/server/DynamicBrokerConfig.scala | 10 +++++-----
.../test/java/kafka/admin/ConfigCommandIntegrationTest.java | 3 ---
3 files changed, 9 insertions(+), 17 deletions(-)
diff --git a/core/src/main/scala/kafka/server/ConfigAdminManager.scala
b/core/src/main/scala/kafka/server/ConfigAdminManager.scala
index 319b45020d4..b0b84fc8c60 100644
--- a/core/src/main/scala/kafka/server/ConfigAdminManager.scala
+++ b/core/src/main/scala/kafka/server/ConfigAdminManager.scala
@@ -58,9 +58,8 @@ import scala.jdk.CollectionConverters._
* KIP-412 added support for changing log4j log levels via
IncrementalAlterConfigs, but
* not via the original AlterConfigs. In retrospect, this would have been
better off as a
* separate RPC, since the semantics are quite different. In particular,
KIP-226 configs
- * are stored durably (in ZK or KRaft) and persist across broker restarts, but
KIP-412
- * log4j levels do not. However, we have to handle it here now in order to
maintain
- * compatibility.
+ * are stored durably and persist across broker restarts, but KIP-412 log4j
levels do not.
+ * However, we have to handle it here now in order to maintain compatibility.
*
* Configuration processing is split into two parts.
* - The first step, called "preprocessing," handles setting KIP-412 log
levels, validating
@@ -69,14 +68,10 @@ import scala.jdk.CollectionConverters._
* - The second step is "persistence," and handles storing the configurations
durably to our
* metadata store.
*
- * When KIP-590 forwarding is active (such as in KRaft mode), preprocessing
will happen
- * on the broker, while persistence will happen on the active controller. (If
KIP-590
- * forwarding is not active, then both steps are done on the same broker.)
- *
- * In KRaft mode, the active controller performs its own configuration
validation step in
+ * The active controller performs its own configuration validation step in
* [[kafka.server.ControllerConfigurationValidator]]. This is mainly important
for
* TOPIC resources, since we already validated changes to BROKER resources on
the
- * forwarding broker. The KRaft controller is also responsible for enforcing
the configured
+ * forwarding broker. The controller is also responsible for enforcing the
configured
* [[org.apache.kafka.server.policy.AlterConfigPolicy]].
*/
class ConfigAdminManager(nodeId: Int,
diff --git a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala
b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala
index 32febffb546..d210ca06f93 100755
--- a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala
+++ b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala
@@ -336,12 +336,12 @@ class DynamicBrokerConfig(private val kafkaConfig:
KafkaConfig) extends Logging
}
/**
- * All config updates through ZooKeeper are triggered through actual changes
in values stored in ZooKeeper.
+ * Config updates are triggered through actual changes in stored values.
* For some configs like SSL keystores and truststores, we also want to
reload the store if it was modified
- * in-place, even though the actual value of the file path and password
haven't changed. This scenario alone
- * is handled here when a config update request using admin client is
processed by ZkAdminManager. If any of
- * the SSL configs have changed, then the update will not be done here, but
will be handled later when ZK
- * changes are processed. At the moment, only listener configs are
considered for reloading.
+ * in-place, even though the actual value of the file path and password
haven't changed. This scenario is
+ * handled when a config update request using admin client is processed by
the AdminManager. If any of
+ * the SSL configs have changed, then the update will be handled when
configuration changes are processed.
+ * At the moment, only listener configs are considered for reloading.
*/
private[server] def reloadUpdatedFilesWithoutConfigChange(newProps:
Properties): Unit = CoreUtils.inWriteLock(lock) {
reconfigurables.forEach(r => {
diff --git a/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java
b/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java
index 31d0de824f1..9aa7c9da2fa 100644
--- a/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java
+++ b/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java
@@ -173,11 +173,9 @@ public class ConfigCommandIntegrationTest {
alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
singletonMap("listener.name.external.ssl.keystore.password", "secret"),
alterOpts);
- // Password config update with encoder secret should succeed and
encoded password must be stored in ZK
Map<String, String> configs = new HashMap<>();
configs.put("listener.name.external.ssl.keystore.password",
"secret");
configs.put("log.cleaner.threads", "2");
- // Password encoder configs
// Password config update at default cluster-level should fail
assertThrows(ExecutionException.class,
@@ -382,7 +380,6 @@ public class ConfigCommandIntegrationTest {
@ClusterTest(
// Must be at greater than 1MB per cleaner thread, set to 2M+2 so
that we can set 2 cleaner threads.
serverProperties = {@ClusterConfigProperty(key =
"log.cleaner.dedupe.buffer.size", value = "2097154")},
- // Zk code has been removed, use kraft and mockito to mock this
situation
metadataVersion = MetadataVersion.IBP_3_3_IV0
)
public void testUnsupportedVersionException() {