This is an automated email from the ASF dual-hosted git repository.

schofielaj pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 3d7ac0c3d12 MINOR: Fix typos in multiple files (#19102)
3d7ac0c3d12 is described below

commit 3d7ac0c3d12d765abf658c89fd23ab10e6f4e87b
Author: co63oc <[email protected]>
AuthorDate: Wed Mar 5 22:27:32 2025 +0800

    MINOR: Fix typos in multiple files (#19102)
    
    Fix typos in multiple files
    
    Reviewers: Andrew Schofield <[email protected]>
---
 .../connect/transforms/field/FieldPathNotationTest.java      |  6 +++---
 core/src/main/java/kafka/server/share/SharePartition.java    |  2 +-
 .../DelegationTokenEndToEndAuthorizationWithOwnerTest.scala  |  2 +-
 docs/streams/upgrade-guide.html                              |  2 +-
 docs/upgrade.html                                            |  2 +-
 .../group/streams/topics/ConfiguredInternalTopic.java        |  2 +-
 .../apache/kafka/controller/ReplicationControlManager.java   |  2 +-
 .../kafka/metadata/properties/MetaPropertiesEnsemble.java    |  2 +-
 .../apache/kafka/controller/PartitionChangeBuilderTest.java  |  2 +-
 .../main/java/org/apache/kafka/server/common/Feature.java    |  2 +-
 .../kafka/streams/integration/LagFetchIntegrationTest.java   |  2 +-
 .../main/java/org/apache/kafka/streams/kstream/Joined.java   |  2 +-
 .../streams/kstream/SessionWindowedCogroupedKStream.java     |  2 +-
 .../apache/kafka/streams/kstream/SessionWindowedKStream.java | 12 ++++++------
 .../kafka/streams/processor/internals/assignment/Graph.java  |  2 +-
 .../kafka/streams/processor/internals/TaskManagerTest.java   |  4 ++--
 16 files changed, 24 insertions(+), 24 deletions(-)

diff --git 
a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldPathNotationTest.java
 
b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldPathNotationTest.java
index 1434778a853..4ccf1303654 100644
--- 
a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldPathNotationTest.java
+++ 
b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldPathNotationTest.java
@@ -81,7 +81,7 @@ class FieldPathNotationTest {
     @Test
     void shouldBuildV2WhenIncludesDotsAndBacktickPair() {
         // Given v2 and fields including dots
-        // When backticks are wrapping a field name (i.e. withing edges or 
between dots)
+        // When backticks are wrapping a field name (i.e. within edges or 
between dots)
         // Then build a path with steps separated by dots and not including 
backticks
         assertParseV2("`foo.bar.baz`", "foo.bar.baz");
         assertParseV2("foo.`bar.baz`", "foo", "bar.baz");
@@ -92,7 +92,7 @@ class FieldPathNotationTest {
     @Test
     void shouldBuildV2AndIgnoreBackticksThatAreNotWrapping() {
         // Given v2 and fields including dots and backticks
-        // When backticks are wrapping a field name (i.e. withing edges or 
between dots)
+        // When backticks are wrapping a field name (i.e. within edges or 
between dots)
         // Then build a path with steps separated by dots and including 
non-wrapping backticks
         assertParseV2("foo.``bar.baz`", "foo", "`bar.baz");
         assertParseV2("foo.`bar.baz``", "foo", "bar.baz`");
@@ -105,7 +105,7 @@ class FieldPathNotationTest {
     @Test
     void shouldBuildV2AndEscapeBackticks() {
         // Given v2 and fields including dots and backticks
-        // When backticks are wrapping a field name (i.e. withing edges or 
between dots)
+        // When backticks are wrapping a field name (i.e. within edges or 
between dots)
         // and wrapping backticks that are part of the field name are escaped 
with backslashes
         // Then build a path with steps separated by dots and including 
escaped and non-wrapping backticks
         assertParseV2("foo.`bar\\`.baz`", "foo", "bar`.baz");
diff --git a/core/src/main/java/kafka/server/share/SharePartition.java 
b/core/src/main/java/kafka/server/share/SharePartition.java
index 301d2678782..56e667a4317 100644
--- a/core/src/main/java/kafka/server/share/SharePartition.java
+++ b/core/src/main/java/kafka/server/share/SharePartition.java
@@ -300,7 +300,7 @@ public class SharePartition {
     private long endOffset;
 
     /**
-     * The initial read gap offset tracks if there are any gaps in the 
in-flight batch during intial
+     * The initial read gap offset tracks if there are any gaps in the 
in-flight batch during initial
      * read of the share partition state from the persister.
      */
     private InitialReadGapOffset initialReadGapOffset;
diff --git 
a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala
 
b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala
index 833b06654d3..4f7a492a3f5 100644
--- 
a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala
+++ 
b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala
@@ -65,7 +65,7 @@ class DelegationTokenEndToEndAuthorizationWithOwnerTest 
extends DelegationTokenE
   private val describeTokenFailPassword = "describe-token-fail-password"
 
   override def configureSecurityAfterServersStart(): Unit = {
-    // Create the Acls before calling super which will create the additiona 
tokens
+    // Create the Acls before calling super which will create the additional 
tokens
     Using.resource(createPrivilegedAdminClient()) { superuserAdminClient =>
       superuserAdminClient.createAcls(List(AclTokenOtherDescribe, 
AclTokenCreate, AclTokenDescribe).asJava).values
 
diff --git a/docs/streams/upgrade-guide.html b/docs/streams/upgrade-guide.html
index 257187bfa45..b1d16397e10 100644
--- a/docs/streams/upgrade-guide.html
+++ b/docs/streams/upgrade-guide.html
@@ -898,7 +898,7 @@
         As of 2.6.0 Kafka Streams deprecates <code>KStream.through()</code> in 
favor of the new <code>KStream.repartition()</code> operator
         (as per <a 
href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-221%3A+Enhance+DSL+with+Connecting+Topic+Creation+and+Repartition+Hint";>KIP-221</a>).
         <code>KStream.repartition()</code> is similar to 
<code>KStream.through()</code>, however Kafka Streams will manage the topic for 
you.
-        If you need to write into and read back from a topic that you mange, 
you can fall back to use <code>KStream.to()</code> in combination with 
<code>StreamsBuilder#stream()</code>.
+        If you need to write into and read back from a topic that you manage, 
you can fall back to use <code>KStream.to()</code> in combination with 
<code>StreamsBuilder#stream()</code>.
         Please refer to the <a 
href="/{{version}}/documentation/streams/developer-guide/dsl-api.html">developer
 guide</a> for more details about <code>KStream.repartition()</code>.
     </p>
 
diff --git a/docs/upgrade.html b/docs/upgrade.html
index 0c3c3330ca7..104e8c02849 100644
--- a/docs/upgrade.html
+++ b/docs/upgrade.html
@@ -386,7 +386,7 @@
                 </li>
                 <li>
                     KIP-714 is now enabled for Kafka Streams via <a 
href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-1076%3A++Metrics+for+client+applications+KIP-714+extension";>KIP-1076</a>.
-                   This allows to not only collect the metric of the 
internally used clients of a Kafka Streams appliction via a broker-side plugin,
+                   This allows to not only collect the metric of the 
internally used clients of a Kafka Streams application via a broker-side plugin,
                    but also to collect the <a 
href="/{{version}}/documentation/#kafka_streams_monitoring">metrics</a> of the 
Kafka Streams runtime itself.
             </li>
                 <li>
diff --git 
a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopic.java
 
b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopic.java
index deba335bec4..fa19804eb0f 100644
--- 
a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopic.java
+++ 
b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopic.java
@@ -32,7 +32,7 @@ import java.util.Optional;
  *
  * @param name               The name of the topic.
  * @param numberOfPartitions The number of partitions for the topic.
- * @param replicationFactor  The replication factor of the topic. If 
undefiend, the broker default is used.
+ * @param replicationFactor  The replication factor of the topic. If 
undefined, the broker default is used.
  * @param topicConfigs       The topic configurations of the topic.
  */
 public record ConfiguredInternalTopic(String name,
diff --git 
a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java
 
b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java
index d4c613003c3..12bbadb17a7 100644
--- 
a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java
+++ 
b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java
@@ -1772,7 +1772,7 @@ public class ReplicationControlManager {
     }
 
     /**
-     * Trigger unclean leader election for partitions without leader (visiable 
for testing)
+     * Trigger unclean leader election for partitions without leader (visible 
for testing)
      *
      * @param records       The record list to append to.
      * @param maxElections  The maximum number of elections to perform.
diff --git 
a/metadata/src/main/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsemble.java
 
b/metadata/src/main/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsemble.java
index 8527ed7c329..600b2d86cc6 100644
--- 
a/metadata/src/main/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsemble.java
+++ 
b/metadata/src/main/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsemble.java
@@ -517,7 +517,7 @@ public final class MetaPropertiesEnsemble {
             }
             if (metaProps.directoryId().isPresent()) {
                 if (DirectoryId.reserved(metaProps.directoryId().get())) {
-                    throw new RuntimeException("Invalid resrved directory ID " 
+
+                    throw new RuntimeException("Invalid reserved directory ID 
" +
                         metaProps.directoryId().get() + " found in " + logDir);
                 }
                 String prevLogDir = 
seenUuids.put(metaProps.directoryId().get(), logDir);
diff --git 
a/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java
 
b/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java
index bea3acc1c00..2cca336bd69 100644
--- 
a/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java
+++ 
b/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java
@@ -580,7 +580,7 @@ public class PartitionChangeBuilderTest {
             .setLeaderRecoveryState(LeaderRecoveryState.RECOVERING.value());
 
         if (version >= 2) {
-            // The test partition has ELR, so unclean election will clear 
these fiedls.
+            // The test partition has ELR, so unclean election will clear 
these fields.
             record.setEligibleLeaderReplicas(Collections.emptyList())
                 .setLastKnownElr(Collections.emptyList());
         }
diff --git 
a/server-common/src/main/java/org/apache/kafka/server/common/Feature.java 
b/server-common/src/main/java/org/apache/kafka/server/common/Feature.java
index af71bc63080..3ac2923126d 100644
--- a/server-common/src/main/java/org/apache/kafka/server/common/Feature.java
+++ b/server-common/src/main/java/org/apache/kafka/server/common/Feature.java
@@ -250,7 +250,7 @@ public enum Feature {
      *     - The feature X has default version = XV_10 (dependency = {}), 
latest production = XV_10 (dependency = {})
      *     - The feature X has default version = XV_10 (dependency = {Y: 
YV_3}), latest production = XV_11 (dependency = {Y: YV_4})
      *       The feature Y has default version = YV_3 (dependency = {}), 
latest production = YV_4 (dependency = {})
-     *     - The feature X has default version = XV_10 (dependency = 
{MetadataVersion: IBP_4_0_IV0}), boostrap MV = IBP_4_0_IV0,
+     *     - The feature X has default version = XV_10 (dependency = 
{MetadataVersion: IBP_4_0_IV0}), bootstrap MV = IBP_4_0_IV0,
      *                       latest production = XV_11 (dependency = 
{MetadataVersion: IBP_4_0_IV1}), MV latest production = IBP_4_0_IV1
      *
      * @param feature the feature to validate.
diff --git 
a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/LagFetchIntegrationTest.java
 
b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/LagFetchIntegrationTest.java
index 0441b7d49e9..db2a809c718 100644
--- 
a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/LagFetchIntegrationTest.java
+++ 
b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/LagFetchIntegrationTest.java
@@ -228,7 +228,7 @@ public class LagFetchIntegrationTest {
             assertThat(lagInfo.currentOffsetPosition(), equalTo(0L));
             assertThat(lagInfo.endOffsetPosition(), equalTo(5L));
             assertThat(lagInfo.offsetLag(), equalTo(5L));
-            // standby thread wont proceed to RUNNING before this barrier is 
crossed
+            // standby thread won't proceed to RUNNING before this barrier is 
crossed
             lagCheckBarrier.await(60, TimeUnit.SECONDS);
 
             // wait till the lag goes down to 0, on the standby
diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/Joined.java 
b/streams/src/main/java/org/apache/kafka/streams/kstream/Joined.java
index f1df05fe2e5..0ae26ec3ccd 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/Joined.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/Joined.java
@@ -169,7 +169,7 @@ public class Joined<K, VLeft, VRight> implements 
NamedOperation<Joined<K, VLeft,
 
 
     /**
-     * Create an instance of {@code Joined} with aother value {@link Serde}.
+     * Create an instance of {@code Joined} with another value {@link Serde}.
      * {@code null} values are accepted and will be replaced by the default 
value serde as defined in config.
      *
      * @param rightValueSerde
diff --git 
a/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedCogroupedKStream.java
 
b/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedCogroupedKStream.java
index 498fbf65dbb..b273132b111 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedCogroupedKStream.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedCogroupedKStream.java
@@ -210,7 +210,7 @@ public interface SessionWindowedCogroupedKStream<K, V> {
      * {@link KafkaStreams#store(StoreQueryParameters)}  
KafkaStreams#store(...)}:
      * <pre>{@code
      * KafkaStreams streams = ... // some windowed aggregation on value type 
double
-     * Sting queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
+     * String queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
      * StoreQueryParameters<ReadOnlySessionStore<String, Long>> 
storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, 
QueryableStoreTypes.sessionStore());
      * ReadOnlySessionStore<String,Long> localWindowStore = 
streams.store(storeQueryParams);
      * String key = "some-key";
diff --git 
a/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedKStream.java
 
b/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedKStream.java
index f18c7c2af54..40e1a6eb724 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedKStream.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/kstream/SessionWindowedKStream.java
@@ -131,7 +131,7 @@ public interface SessionWindowedKStream<K, V> {
      * {@link KafkaStreams#store(StoreQueryParameters) 
KafkaStreams#store(...)}:
      * <pre>{@code
      * KafkaStreams streams = ... // compute sum
-     * Sting queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
+     * String queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
      * StoreQueryParameters<ReadOnlySessionStore<String, Long>> 
storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, 
QueryableStoreTypes.sessionStore());
      * ReadOnlySessionStore<String,Long> sessionStore = 
streams.store(storeQueryParams);
      * String key = "some-key";
@@ -178,7 +178,7 @@ public interface SessionWindowedKStream<K, V> {
      * {@link KafkaStreams#store(StoreQueryParameters) 
KafkaStreams#store(...)}:
      * <pre>{@code
      * KafkaStreams streams = ... // compute sum
-     * Sting queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
+     * String queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
      * StoreQueryParameters<ReadOnlySessionStore<String, Long>> 
storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, 
QueryableStoreTypes.sessionStore());
      * ReadOnlySessionStore<String,Long> sessionStore = 
streams.store(storeQueryParams);
      * String key = "some-key";
@@ -332,7 +332,7 @@ public interface SessionWindowedKStream<K, V> {
      * {@link KafkaStreams#store(StoreQueryParameters) 
KafkaStreams#store(...)}:
      * <pre>{@code
      * KafkaStreams streams = ... // compute sum
-     * Sting queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
+     * String queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
      * StoreQueryParameters<ReadOnlySessionStore<String, Long>> 
storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, 
QueryableStoreTypes.sessionStore());
      * ReadOnlySessionStore<String,Long> sessionStore = 
streams.store(storeQueryParams);
      * String key = "some-key";
@@ -393,7 +393,7 @@ public interface SessionWindowedKStream<K, V> {
      * {@link KafkaStreams#store(StoreQueryParameters)}  
KafkaStreams#store(...)}:
      * <pre>{@code
      * KafkaStreams streams = ... // compute sum
-     * Sting queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
+     * String queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
      * StoreQueryParameters<ReadOnlySessionStore<String, Long>> 
storeQueryParams = 
StoreQueryParameters.fromNameAndType(QueryableStoreTypes.sessionStore());
      * ReadOnlySessionStore<String,Long> sessionStore = 
streams.store(storeQueryParams);
      * String key = "some-key";
@@ -552,7 +552,7 @@ public interface SessionWindowedKStream<K, V> {
      * {@link KafkaStreams#store(StoreQueryParameters) 
KafkaStreams#store(...)}:
      * <pre>{@code
      * KafkaStreams streams = ... // some windowed aggregation on value type 
double
-     * Sting queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
+     * String queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
      * StoreQueryParameters<ReadOnlySessionStore<String, Long>> 
storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, 
QueryableStoreTypes.sessionStore());
      * ReadOnlySessionStore<String,Long> sessionStore = 
streams.store(storeQueryParams);
      * String key = "some-key";
@@ -614,7 +614,7 @@ public interface SessionWindowedKStream<K, V> {
      * {@link KafkaStreams#store(StoreQueryParameters) 
KafkaStreams#store(...)}:
      * <pre>{@code
      * KafkaStreams streams = ... // some windowed aggregation on value type 
double
-     * Sting queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
+     * String queryableStoreName = ... // the queryableStoreName should be the 
name of the store as defined by the Materialized instance
      * StoreQueryParameters<ReadOnlySessionStore<String, Long>> 
storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, 
QueryableStoreTypes.sessionStore());
      * ReadOnlySessionStore<String,Long> sessionStore = 
streams.store(storeQueryParams);
      * String key = "some-key";
diff --git 
a/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/Graph.java
 
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/Graph.java
index cf585676851..7f066bdccb7 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/Graph.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/Graph.java
@@ -214,7 +214,7 @@ public class Graph<V extends Comparable<V>> {
         }
 
         // Add a dummy null node connected to every existing node with 
residual flow 1 and cost 0
-        // Then try to find negative cylce starting using dummy node as source 
node. Since there's no
+        // Then try to find negative cycle starting using dummy node as source 
node. Since there's no
         // path from original nodes to null node, negative cycles must be 
within original nodes.
         final TreeMap<V, Edge> destMap = new TreeMap<>();
         for (final V node : residualGraph.nodes) {
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
index 6d812e0119e..79b45dceeba 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
@@ -4673,7 +4673,7 @@ public class TaskManagerTest {
         verifyNoMoreInteractions(activeTaskCreator);
         verifyNoMoreInteractions(standbyTaskCreator);
 
-        // verify the recycled task is now being used as an assiged Active
+        // verify the recycled task is now being used as an assigned Active
         assertEquals(Collections.singletonMap(taskId00, activeTask), 
taskManager.activeTaskMap());
         assertEquals(Collections.emptyMap(), taskManager.standbyTaskMap());
     }
@@ -4734,7 +4734,7 @@ public class TaskManagerTest {
         verifyNoMoreInteractions(activeTaskCreator);
         verifyNoMoreInteractions(standbyTaskCreator);
 
-        // verify the recycled task is now being used as an assiged Active
+        // verify the recycled task is now being used as an assigned Active
         assertEquals(Collections.singletonMap(taskId00, activeTask), 
taskManager.activeTaskMap());
         assertEquals(Collections.emptyMap(), taskManager.standbyTaskMap());
     }

Reply via email to