This is an automated email from the ASF dual-hosted git repository.
payang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new d0bf2423ee0 MINOR: miss spelling (#21534)
d0bf2423ee0 is described below
commit d0bf2423ee0f5fa65f03fae0220f590e2dcb1f5f
Author: Hy (하이) <[email protected]>
AuthorDate: Sun Feb 22 22:05:47 2026 +0900
MINOR: miss spelling (#21534)
Correcting typos in comments
Reviewers: PoAn Yang [[email protected]](mailto:[email protected])
---------
Co-authored-by: high.lee <[email protected]>
Co-authored-by: hy-rice <[email protected]>
---
.../main/java/org/apache/kafka/common/internals/KafkaFutureImpl.java | 2 +-
docs/streams/upgrade-guide.md | 2 +-
.../org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java | 2 +-
.../apache/kafka/common/test/junit/RaftClusterInvocationContext.java | 4 ++--
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git
a/clients/src/main/java/org/apache/kafka/common/internals/KafkaFutureImpl.java
b/clients/src/main/java/org/apache/kafka/common/internals/KafkaFutureImpl.java
index b2e93abaae7..803ec9eeeef 100644
---
a/clients/src/main/java/org/apache/kafka/common/internals/KafkaFutureImpl.java
+++
b/clients/src/main/java/org/apache/kafka/common/internals/KafkaFutureImpl.java
@@ -138,7 +138,7 @@ public class KafkaFutureImpl<T> extends KafkaFuture<T> {
* (which KafkaFuture does not).
*
* The semantics for KafkaFuture are that all exceptional completions of
the future (via #completeExceptionally()
- * or exceptions from dependants) manifest as ExecutionException, as
observed via both get() and getNow().
+ * or exceptions from dependents) manifest as ExecutionException, as
observed via both get() and getNow().
*/
private void maybeThrowCancellationException(Throwable cause) {
if (cause instanceof CancellationException) {
diff --git a/docs/streams/upgrade-guide.md b/docs/streams/upgrade-guide.md
index 46e579d72a5..be16ebe0235 100644
--- a/docs/streams/upgrade-guide.md
+++ b/docs/streams/upgrade-guide.md
@@ -319,7 +319,7 @@ For multi-AZ deployments, it is desired to assign
StandbyTasks to a KafkaStreams
[Interactive
Queries](/documentation/streams/developer-guide/interactive-queries.html) allow
users to tap into the operational state of Kafka Streams processor nodes. The
existing API is tightly coupled with the actual state store interfaces and thus
the internal implementation of state store. To break up this tight coupling and
allow for building more advanced IQ features,
[KIP-796](https://cwiki.apache.org/confluence/x/34xnCw) introduces a completely
new IQv2 API, via `StateQueryReques [...]
-The Kafka Streams DSL may insert so-called repartition topics for certain DSL
operators to ensure correct partitioning of data. These topics are configured
with infinite retention time, and Kafka Streams purges old data explicitly via
"delete record" requests, when commiting input topic offsets.
[KIP-811](https://cwiki.apache.org/confluence/x/JY-kCw) adds a new config
`repartition.purge.interval.ms` allowing you to configure the purge interval
independently of the commit interval.
+The Kafka Streams DSL may insert so-called repartition topics for certain DSL
operators to ensure correct partitioning of data. These topics are configured
with infinite retention time, and Kafka Streams purges old data explicitly via
"delete record" requests, when committing input topic offsets.
[KIP-811](https://cwiki.apache.org/confluence/x/JY-kCw) adds a new config
`repartition.purge.interval.ms` allowing you to configure the purge interval
independently of the commit interval.
## Streams API changes in 3.1.0
diff --git
a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java
b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java
index cbfc15de4d7..9c48b15103f 100644
---
a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java
+++
b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java
@@ -3022,7 +3022,7 @@ public class OffsetMetadataManagerTest {
new OffsetExpirationConditionImpl(offsetAndMetadata ->
offsetAndMetadata.commitTimestampMs)));
when(group.isSubscribedToTopic("foo")).thenReturn(false);
- // foo-0 is expired, but the group is not deleted beacuse it has
pending transactional offset commits.
+ // foo-0 is expired, but the group is not deleted because it has
pending transactional offset commits.
List<CoordinatorRecord> expectedRecords = List.of(
GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord("group-id", "foo",
0)
);
diff --git
a/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/RaftClusterInvocationContext.java
b/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/RaftClusterInvocationContext.java
index 23eed911b42..704d4a7a563 100644
---
a/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/RaftClusterInvocationContext.java
+++
b/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/RaftClusterInvocationContext.java
@@ -129,7 +129,7 @@ public class RaftClusterInvocationContext implements
TestTemplateInvocationConte
private final ClusterConfig clusterConfig;
final AtomicBoolean started = new AtomicBoolean(false);
final AtomicBoolean stopped = new AtomicBoolean(false);
- final AtomicBoolean formated = new AtomicBoolean(false);
+ final AtomicBoolean formatted = new AtomicBoolean(false);
private KafkaClusterTestKit clusterTestKit;
private final boolean isCombined;
private final ListenerName listenerName;
@@ -269,7 +269,7 @@ public class RaftClusterInvocationContext implements
TestTemplateInvocationConte
}
public void format() throws Exception {
- if (formated.compareAndSet(false, true)) {
+ if (formatted.compareAndSet(false, true)) {
Map<String, Feature> nameToSupportedFeature = new TreeMap<>();
Feature.PRODUCTION_FEATURES.forEach(feature ->
nameToSupportedFeature.put(feature.featureName(), feature));
Map<String, Short> newFeatureLevels = new TreeMap<>();