This is an automated email from the ASF dual-hosted git repository.
divijv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new e71f68d6c91 MINOR: fix typos for client (#13884)
e71f68d6c91 is described below
commit e71f68d6c91394db30bb1219ea882232f7be194d
Author: minjian.cai <[email protected]>
AuthorDate: Wed Jun 28 22:47:42 2023 +0800
MINOR: fix typos for client (#13884)
Reviewers: Luke Chen <[email protected]>, Divij Vaidya <[email protected]>,
Kirk True <[email protected]>
---
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java | 2 +-
.../src/main/java/org/apache/kafka/clients/admin/ConfigEntry.java | 2 +-
.../java/org/apache/kafka/clients/admin/KafkaAdminClient.java | 4 ++--
.../java/org/apache/kafka/clients/consumer/KafkaConsumer.java | 2 +-
.../kafka/clients/consumer/NoOffsetForPartitionException.java | 2 +-
.../kafka/clients/consumer/internals/AbstractStickyAssignor.java | 4 ++--
.../kafka/clients/consumer/internals/NetworkClientDelegate.java | 2 +-
.../apache/kafka/clients/consumer/internals/OffsetFetcher.java | 2 +-
.../kafka/clients/consumer/internals/events/ApplicationEvent.java | 2 +-
.../org/apache/kafka/common/requests/FetchSnapshotResponse.java | 2 +-
.../org/apache/kafka/common/security/auth/SaslExtensions.java | 2 +-
.../internals/unsecured/OAuthBearerValidationUtils.java | 2 +-
.../src/main/java/org/apache/kafka/common/utils/ConfigUtils.java | 2 +-
.../src/main/resources/common/message/AlterPartitionResponse.json | 2 +-
.../resources/common/message/ConsumerGroupHeartbeatRequest.json | 4 ++--
.../src/main/resources/common/message/CreateTopicsResponse.json | 2 +-
clients/src/main/resources/common/message/FetchRequest.json | 2 +-
clients/src/main/resources/common/message/ListOffsetsRequest.json | 2 +-
.../src/main/resources/common/message/ListOffsetsResponse.json | 2 +-
clients/src/main/resources/common/message/MetadataRequest.json | 2 +-
clients/src/main/resources/common/message/ProduceRequest.json | 2 +-
.../src/main/resources/common/message/TxnOffsetCommitRequest.json | 2 +-
.../src/test/java/org/apache/kafka/clients/NetworkClientTest.java | 2 +-
.../kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java | 6 +++---
.../clients/admin/internals/PartitionLeaderStrategyTest.java | 2 +-
.../java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java | 2 +-
.../kafka/clients/consumer/internals/ConsumerCoordinatorTest.java | 2 +-
.../org/apache/kafka/clients/consumer/internals/FetcherTest.java | 4 ++--
.../java/org/apache/kafka/clients/producer/MockProducerTest.java | 2 +-
.../test/java/org/apache/kafka/common/feature/FeaturesTest.java | 2 +-
.../test/java/org/apache/kafka/common/metrics/MetricsTest.java | 8 ++++----
.../apache/kafka/common/network/SslTransportTls12Tls13Test.java | 6 +++---
.../common/security/authenticator/SaslAuthenticatorTest.java | 2 +-
.../internals/OAuthBearerClientInitialResponseTest.java | 4 ++--
.../security/oauthbearer/internals/OAuthBearerSaslServerTest.java | 4 ++--
.../common/security/plain/internals/PlainSaslServerTest.java | 4 ++--
.../common/security/scram/internals/ScramSaslServerTest.java | 4 ++--
clients/src/test/java/org/apache/kafka/test/MetricsBench.java | 4 ++--
38 files changed, 54 insertions(+), 54 deletions(-)
diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
index 1d469a66436..bd007f1839b 100644
--- a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
+++ b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
@@ -969,7 +969,7 @@ public interface Admin extends AutoCloseable {
* Delete consumer groups from the cluster.
*
* @param options The options to use when deleting a consumer group.
- * @return The DeletConsumerGroupResult.
+ * @return The DeleteConsumerGroupsResult.
*/
DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String>
groupIds, DeleteConsumerGroupsOptions options);
diff --git
a/clients/src/main/java/org/apache/kafka/clients/admin/ConfigEntry.java
b/clients/src/main/java/org/apache/kafka/clients/admin/ConfigEntry.java
index 154fc8e65db..af33a70bb46 100644
--- a/clients/src/main/java/org/apache/kafka/clients/admin/ConfigEntry.java
+++ b/clients/src/main/java/org/apache/kafka/clients/admin/ConfigEntry.java
@@ -241,7 +241,7 @@ public class ConfigEntry {
*
* @param name Configuration name (this may be different from the name
of the associated {@link ConfigEntry}
* @param value Configuration value
- * @param source {@link ConfigSource} of this configuraton
+ * @param source {@link ConfigSource} of this configuration
*/
ConfigSynonym(String name, String value, ConfigSource source) {
this.name = name;
diff --git
a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
index 441adceee2e..f2294df3a3a 100644
--- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
+++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
@@ -3980,14 +3980,14 @@ public class KafkaAdminClient extends AdminClient {
.setMechanism(u.credentialInfo().mechanism().type())
.setIterations(u.credentialInfo().iterations())
.setSalt(u.salt())
-
.setSaltedPassword(getSaltedPasword(u.credentialInfo().mechanism(),
u.password(), u.salt(), u.credentialInfo().iterations()));
+
.setSaltedPassword(getSaltedPassword(u.credentialInfo().mechanism(),
u.password(), u.salt(), u.credentialInfo().iterations()));
}
private static
AlterUserScramCredentialsRequestData.ScramCredentialDeletion
getScramCredentialDeletion(UserScramCredentialDeletion d) {
return new
AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(d.user()).setMechanism(d.mechanism().type());
}
- private static byte[] getSaltedPasword(ScramMechanism
publicScramMechanism, byte[] password, byte[] salt, int iterations) throws
NoSuchAlgorithmException, InvalidKeyException {
+ private static byte[] getSaltedPassword(ScramMechanism
publicScramMechanism, byte[] password, byte[] salt, int iterations) throws
NoSuchAlgorithmException, InvalidKeyException {
return new
ScramFormatter(org.apache.kafka.common.security.scram.internals.ScramMechanism.forMechanismName(publicScramMechanism.mechanismName()))
.hi(password, salt, iterations);
}
diff --git
a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index 2785c646bbe..076952f37b8 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -424,7 +424,7 @@ import java.util.regex.Pattern;
* <p>
* One of such cases is stream processing, where processor fetches from two
topics and performs the join on these two streams.
* When one of the topics is long lagging behind the other, the processor
would like to pause fetching from the ahead topic
- * in order to get the lagging stream to catch up. Another example is
bootstraping upon consumer starting up where there are
+ * in order to get the lagging stream to catch up. Another example is
bootstrapping upon consumer starting up where there are
* a lot of history data to catch up, the applications usually want to get the
latest data on some of the topics before consider
* fetching other topics.
*
diff --git
a/clients/src/main/java/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java
b/clients/src/main/java/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java
index 1770d6048ea..2890f8e3332 100644
---
a/clients/src/main/java/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java
+++
b/clients/src/main/java/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java
@@ -44,7 +44,7 @@ public class NoOffsetForPartitionException extends
InvalidOffsetException {
}
/**
- * returns all partitions for which no offests are defined.
+ * returns all partitions for which no offsets are defined.
* @return all partitions without offsets
*/
public Set<TopicPartition> partitions() {
diff --git
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
index 7c53100458e..1bde792d598 100644
---
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
+++
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
@@ -179,7 +179,7 @@ public abstract class AbstractStickyAssignor extends
AbstractPartitionAssignor {
consumerToOwnedPartitions.get(otherConsumer).remove(tp);
allPreviousPartitionsToOwner.put(tp, consumer);
log.warn("Consumer {} in generation {} and
consumer {} in generation {} claiming the same " +
- "TopicPartition {} in different
generations. The topic partition wil be " +
+ "TopicPartition {} in different
generations. The topic partition will be " +
"assigned to the member with the
higher generation {}.",
consumer, memberGeneration,
otherConsumer, otherMemberGeneration,
@@ -188,7 +188,7 @@ public abstract class AbstractStickyAssignor extends
AbstractPartitionAssignor {
} else {
// let the other member continue to own the topic
partition
log.warn("Consumer {} in generation {} and
consumer {} in generation {} claiming the same " +
- "TopicPartition {} in different
generations. The topic partition wil be " +
+ "TopicPartition {} in different
generations. The topic partition will be " +
"assigned to the member with the
higher generation {}.",
consumer, memberGeneration,
otherConsumer, otherMemberGeneration,
diff --git
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java
index b2f6da2d20c..9fab7f8ef75 100644
---
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java
+++
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java
@@ -199,7 +199,7 @@ public class NetworkClientDelegate implements AutoCloseable
{
public static class UnsentRequest {
private final AbstractRequest.Builder<?> requestBuilder;
private final FutureCompletionHandler handler;
- private Optional<Node> node; // empty if random node can be choosen
+ private Optional<Node> node; // empty if random node can be chosen
private Timer timer;
public UnsentRequest(
diff --git
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java
index c6036f880c9..a97a63a47e3 100644
---
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java
+++
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java
@@ -430,7 +430,7 @@ public class OffsetFetcher {
* leaders available. Topic partitions from `timestampsToSearch` that do
not have their leader
* available are added to `partitionsToRetry`
*
- * @param timestampsToSearch The mapping from partitions ot the target
timestamps
+ * @param timestampsToSearch The mapping from partitions to the target
timestamps
* @param partitionsToRetry A set of topic partitions that will be
extended with partitions
* that need metadata update or re-connect to
the leader.
*/
diff --git
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java
index a55978770ad..98b2aebeb4b 100644
---
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java
+++
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java
@@ -26,7 +26,7 @@ abstract public class ApplicationEvent {
this.type = type;
}
/**
- * process the application event. Return true upon succesful execution,
+ * process the application event. Return true upon successful execution,
* false otherwise.
* @return true if the event was successfully executed; false otherwise.
*/
diff --git
a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java
b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java
index d9abff66fe9..c86ba9cfc66 100644
---
a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java
+++
b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java
@@ -124,7 +124,7 @@ final public class FetchSnapshotResponse extends
AbstractResponse {
.stream()
.filter(topic -> topic.name().equals(topicPartition.topic()))
.flatMap(topic -> topic.partitions().stream())
- .filter(parition -> parition.index() == topicPartition.partition())
+ .filter(partition -> partition.index() ==
topicPartition.partition())
.findAny();
}
diff --git
a/clients/src/main/java/org/apache/kafka/common/security/auth/SaslExtensions.java
b/clients/src/main/java/org/apache/kafka/common/security/auth/SaslExtensions.java
index ca4c4df6079..662f50d0f6f 100644
---
a/clients/src/main/java/org/apache/kafka/common/security/auth/SaslExtensions.java
+++
b/clients/src/main/java/org/apache/kafka/common/security/auth/SaslExtensions.java
@@ -32,7 +32,7 @@ import javax.security.auth.Subject;
* overrides the standard {@link #equals(Object)} and {@link #hashCode()}
methods calling their
* respective {@link Object#equals(Object)} and {@link Object#hashCode()}
implementations. In so
* doing, it provides equality <em>only</em> via reference identity and will
not base equality on
- * the underlying values of its {@link #extensionsMap extentions map}.
+ * the underlying values of its {@link #extensionsMap extensions map}.
*
* <p/>
*
diff --git
a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java
b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java
index 616f2f01a8e..f12a482f149 100644
---
a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java
+++
b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java
@@ -175,7 +175,7 @@ public class OAuthBearerValidationUtils {
for (String requiredScopeElement : requiredScope) {
if (!tokenScope.contains(requiredScopeElement))
return OAuthBearerValidationResult.newFailure(String.format(
- "The provided scope (%s) was mising a required scope
(%s). All required scope elements: %s",
+ "The provided scope (%s) was missing a required scope
(%s). All required scope elements: %s",
String.valueOf(tokenScope), requiredScopeElement,
requiredScope),
requiredScope.toString(), null);
}
diff --git
a/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java
b/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java
index 0f839ffa962..23de638ed95 100644
--- a/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java
+++ b/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java
@@ -105,7 +105,7 @@ public class ConfigUtils {
newConfigs.put(target, configs.get(target));
} else if (deprecated.size() > 1) {
log.error("The configuration keys " + aliasString + " are
deprecated and may be " +
- "removed in the future. Additionally, this
configuration is ambigous because " +
+ "removed in the future. Additionally, this
configuration is ambiguous because " +
"these configuration keys are all aliases for " +
target + ". Please update " +
"your configuration to have only " + target + "
set.");
newConfigs.put(target, configs.get(deprecated.get(0)));
diff --git
a/clients/src/main/resources/common/message/AlterPartitionResponse.json
b/clients/src/main/resources/common/message/AlterPartitionResponse.json
index 2c1eb3d46fb..e5a5408624c 100644
--- a/clients/src/main/resources/common/message/AlterPartitionResponse.json
+++ b/clients/src/main/resources/common/message/AlterPartitionResponse.json
@@ -22,7 +22,7 @@
// Version 2 adds TopicId field to replace TopicName field, can return the
following new errors:
// INELIGIBLE_REPLICA, NEW_LEADER_ELECTED and UNKNOWN_TOPIC_ID (KIP-841).
//
- // Version 3 is the same as vesion 2 (KIP-903).
+ // Version 3 is the same as version 2 (KIP-903).
"validVersions": "0-3",
"flexibleVersions": "0+",
"fields": [
diff --git
a/clients/src/main/resources/common/message/ConsumerGroupHeartbeatRequest.json
b/clients/src/main/resources/common/message/ConsumerGroupHeartbeatRequest.json
index c63996604e7..dbe69d2f822 100644
---
a/clients/src/main/resources/common/message/ConsumerGroupHeartbeatRequest.json
+++
b/clients/src/main/resources/common/message/ConsumerGroupHeartbeatRequest.json
@@ -19,8 +19,8 @@
"listeners": ["zkBroker", "broker"],
"name": "ConsumerGroupHeartbeatRequest",
// The ConsumerGroupHeartbeat API is added as part of KIP-848 and is still
- // under developement. Hence, the API is not exposed by default by brokers
- // unless explicitely enabled.
+ // under development. Hence, the API is not exposed by default by brokers
+ // unless explicitly enabled.
"latestVersionUnstable": true,
"validVersions": "0",
"flexibleVersions": "0+",
diff --git
a/clients/src/main/resources/common/message/CreateTopicsResponse.json
b/clients/src/main/resources/common/message/CreateTopicsResponse.json
index c1bf88285a1..00fa348a31a 100644
--- a/clients/src/main/resources/common/message/CreateTopicsResponse.json
+++ b/clients/src/main/resources/common/message/CreateTopicsResponse.json
@@ -31,7 +31,7 @@
// Version 6 is identical to version 5 but may return a
THROTTLING_QUOTA_EXCEEDED error
// in the response if the topics creation is throttled (KIP-599).
//
- // Version 7 returns the topic ID of the newly created topic if creation is
sucessful.
+ // Version 7 returns the topic ID of the newly created topic if creation is
successful.
"validVersions": "0-7",
"flexibleVersions": "5+",
"fields": [
diff --git a/clients/src/main/resources/common/message/FetchRequest.json
b/clients/src/main/resources/common/message/FetchRequest.json
index 2a571dba024..295cbf3aa82 100644
--- a/clients/src/main/resources/common/message/FetchRequest.json
+++ b/clients/src/main/resources/common/message/FetchRequest.json
@@ -21,7 +21,7 @@
//
// Version 1 is the same as version 0.
//
- // Starting in Version 2, the requestor must be able to handle Kafka Log
+ // Starting in Version 2, the requester must be able to handle Kafka Log
// Message format version 1.
//
// Version 3 adds MaxBytes. Starting in version 3, the partition ordering in
diff --git a/clients/src/main/resources/common/message/ListOffsetsRequest.json
b/clients/src/main/resources/common/message/ListOffsetsRequest.json
index 4e4d07ed49f..0b98e310858 100644
--- a/clients/src/main/resources/common/message/ListOffsetsRequest.json
+++ b/clients/src/main/resources/common/message/ListOffsetsRequest.json
@@ -38,7 +38,7 @@
"flexibleVersions": "6+",
"fields": [
{ "name": "ReplicaId", "type": "int32", "versions": "0+", "entityType":
"brokerId",
- "about": "The broker ID of the requestor, or -1 if this request is being
made by a normal consumer." },
+ "about": "The broker ID of the requester, or -1 if this request is being
made by a normal consumer." },
{ "name": "IsolationLevel", "type": "int8", "versions": "2+",
"about": "This setting controls the visibility of transactional records.
Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With
READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED
transactional records are visible. To be more concrete, READ_COMMITTED returns
all data from offsets smaller than the current LSO (last stable offset), and
enables the inclusion of the list of aborted transactions in the result, which
allows consumers to discard ABO [...]
{ "name": "Topics", "type": "[]ListOffsetsTopic", "versions": "0+",
diff --git a/clients/src/main/resources/common/message/ListOffsetsResponse.json
b/clients/src/main/resources/common/message/ListOffsetsResponse.json
index 00a82866005..76177a60a9d 100644
--- a/clients/src/main/resources/common/message/ListOffsetsResponse.json
+++ b/clients/src/main/resources/common/message/ListOffsetsResponse.json
@@ -33,7 +33,7 @@
// Version 7 is the same as version 6 (KIP-734).
//
// Version 8 enables listing offsets by local log start offset.
- // This is the ealiest log start offset in the local log. (KIP-405).
+ // This is the earliest log start offset in the local log. (KIP-405).
"validVersions": "0-8",
"flexibleVersions": "6+",
"fields": [
diff --git a/clients/src/main/resources/common/message/MetadataRequest.json
b/clients/src/main/resources/common/message/MetadataRequest.json
index 5da95cfed68..18fb982f651 100644
--- a/clients/src/main/resources/common/message/MetadataRequest.json
+++ b/clients/src/main/resources/common/message/MetadataRequest.json
@@ -23,7 +23,7 @@
"fields": [
// In version 0, an empty array indicates "request metadata for all
topics." In version 1 and
// higher, an empty array indicates "request metadata for no topics," and
a null array is used to
- // indiate "request metadata for all topics."
+ // indicate "request metadata for all topics."
//
// Version 2 and 3 are the same as version 1.
//
diff --git a/clients/src/main/resources/common/message/ProduceRequest.json
b/clients/src/main/resources/common/message/ProduceRequest.json
index 90900af5800..96ef8fb3e69 100644
--- a/clients/src/main/resources/common/message/ProduceRequest.json
+++ b/clients/src/main/resources/common/message/ProduceRequest.json
@@ -23,7 +23,7 @@
// Version 3 adds the transactional ID, which is used for authorization when
attempting to write
// transactional data. Version 3 also adds support for Kafka Message Format
v2.
//
- // Version 4 is the same as version 3, but the requestor must be prepared to
handle a
+ // Version 4 is the same as version 3, but the requester must be prepared to
handle a
// KAFKA_STORAGE_ERROR.
//
// Version 5 and 6 are the same as version 3.
diff --git
a/clients/src/main/resources/common/message/TxnOffsetCommitRequest.json
b/clients/src/main/resources/common/message/TxnOffsetCommitRequest.json
index a832ef7a968..0e7b1875420 100644
--- a/clients/src/main/resources/common/message/TxnOffsetCommitRequest.json
+++ b/clients/src/main/resources/common/message/TxnOffsetCommitRequest.json
@@ -46,7 +46,7 @@
{ "name": "Name", "type": "string", "versions": "0+", "entityType":
"topicName",
"about": "The topic name." },
{ "name": "Partitions", "type": "[]TxnOffsetCommitRequestPartition",
"versions": "0+",
- "about": "The partitions inside the topic that we want to committ
offsets for.", "fields": [
+ "about": "The partitions inside the topic that we want to commit
offsets for.", "fields": [
{ "name": "PartitionIndex", "type": "int32", "versions": "0+",
"about": "The index of the partition within the topic." },
{ "name": "CommittedOffset", "type": "int64", "versions": "0+",
diff --git
a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java
b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java
index f5b85e5e787..bb6b9b48c67 100644
--- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java
+++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java
@@ -461,7 +461,7 @@ public class NetworkClientTest {
/**
* This is a helper method that will execute two produce calls. The first
call is expected to work and the
- * second produce call is intentionally made to emulate a request timeout.
In the case that a timeout occurrs
+ * second produce call is intentionally made to emulate a request timeout.
In the case that a timeout occurs
* during a request, we want to ensure that we {@link
Metadata#requestUpdate() request a metadata update} so that
* on a subsequent invocation of {@link NetworkClient#poll(long, long)
poll}, the metadata request will be sent.
*
diff --git
a/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java
b/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java
index 82a344c1749..e0e0d5b4c0d 100644
---
a/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java
+++
b/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java
@@ -68,7 +68,7 @@ public class DeleteConsumerGroupOffsetsResultTest {
@Test
public void testPartitionLevelErrorConstructor() throws
ExecutionException, InterruptedException {
- createAndVerifyPartitionLevelErrror();
+ createAndVerifyPartitionLevelError();
}
@Test
@@ -86,7 +86,7 @@ public class DeleteConsumerGroupOffsetsResultTest {
@Test
public void testPartitionMissingInRequestErrorConstructor() throws
InterruptedException, ExecutionException {
- DeleteConsumerGroupOffsetsResult partitionLevelErrorResult =
createAndVerifyPartitionLevelErrror();
+ DeleteConsumerGroupOffsetsResult partitionLevelErrorResult =
createAndVerifyPartitionLevelError();
assertThrows(IllegalArgumentException.class, () ->
partitionLevelErrorResult.partitionResult(new TopicPartition("invalid-topic",
0)));
}
@@ -104,7 +104,7 @@ public class DeleteConsumerGroupOffsetsResultTest {
assertNull(noErrorResult.partitionResult(tpOne).get());
}
- private DeleteConsumerGroupOffsetsResult
createAndVerifyPartitionLevelErrror() throws InterruptedException,
ExecutionException {
+ private DeleteConsumerGroupOffsetsResult
createAndVerifyPartitionLevelError() throws InterruptedException,
ExecutionException {
partitionFutures.complete(errorsMap);
assertFalse(partitionFutures.isCompletedExceptionally());
DeleteConsumerGroupOffsetsResult partitionLevelErrorResult =
diff --git
a/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyTest.java
b/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyTest.java
index e600df47d70..f65b97b4455 100644
---
a/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyTest.java
+++
b/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyTest.java
@@ -93,7 +93,7 @@ public class PartitionLeaderStrategyTest {
}
@Test
- public void testUnexpectedTopicErrror() {
+ public void testUnexpectedTopicError() {
TopicPartition topicPartition = new TopicPartition("foo", 0);
Throwable exception = assertFatalTopicError(topicPartition,
Errors.UNKNOWN_SERVER_ERROR);
assertTrue(exception instanceof UnknownServerException);
diff --git
a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java
b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java
index c2e654a5f0b..2c31e8ca848 100644
---
a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java
+++
b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java
@@ -2321,7 +2321,7 @@ public class KafkaConsumerTest {
}
@Test
- public void testListOffsetShouldUpateSubscriptions() {
+ public void testListOffsetShouldUpdateSubscriptions() {
final ConsumerMetadata metadata = createMetadata(subscription);
final MockClient client = new MockClient(time, metadata);
diff --git
a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java
b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java
index 47c9893fd89..03493a12290 100644
---
a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java
+++
b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java
@@ -458,7 +458,7 @@ public abstract class ConsumerCoordinatorTest {
}
@Test
- public void testSelectRebalanceProtcol() {
+ public void testSelectRebalanceProtocol() {
List<ConsumerPartitionAssignor> assignors = new ArrayList<>();
assignors.add(new
MockPartitionAssignor(Collections.singletonList(ConsumerPartitionAssignor.RebalanceProtocol.EAGER)));
assignors.add(new
MockPartitionAssignor(Collections.singletonList(COOPERATIVE)));
diff --git
a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java
b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java
index c52c1587803..3850e8b072b 100644
---
a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java
+++
b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java
@@ -1498,7 +1498,7 @@ public class FetcherTest {
}
@Test
- public void
testFetchDiscardedAfterPausedPartitionResumedAndSeekedToNewOffset() {
+ public void
testFetchDiscardedAfterPausedPartitionResumedAndSoughtToNewOffset() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
@@ -1513,7 +1513,7 @@ public class FetcherTest {
assertTrue(fetcher.hasCompletedFetches(), "Should have 1 entry in
completed fetches");
Fetch<byte[], byte[]> fetch = collectFetch();
- assertEquals(emptyMap(), fetch.records(), "Should not return any
records because we seeked to a new offset");
+ assertEquals(emptyMap(), fetch.records(), "Should not return any
records because we sought to a new offset");
assertFalse(fetch.positionAdvanced());
assertFalse(fetcher.hasCompletedFetches(), "Should have no completed
fetches");
}
diff --git
a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
index 8c7884bd77c..787f85cce53 100644
---
a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
+++
b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
@@ -101,7 +101,7 @@ public class MockProducerTest {
Future<RecordMetadata> md2 = producer.send(record2);
assertFalse(md2.isDone(), "Send shouldn't have completed");
assertTrue(producer.completeNext(), "Complete the first request");
- assertFalse(isError(md1), "Requst should be successful");
+ assertFalse(isError(md1), "Request should be successful");
assertFalse(md2.isDone(), "Second request still incomplete");
IllegalArgumentException e = new IllegalArgumentException("blah");
assertTrue(producer.errorNext(e), "Complete the second request with an
error");
diff --git
a/clients/src/test/java/org/apache/kafka/common/feature/FeaturesTest.java
b/clients/src/test/java/org/apache/kafka/common/feature/FeaturesTest.java
index 0b2bc4f50a2..dcec3565710 100644
--- a/clients/src/test/java/org/apache/kafka/common/feature/FeaturesTest.java
+++ b/clients/src/test/java/org/apache/kafka/common/feature/FeaturesTest.java
@@ -100,7 +100,7 @@ public class FeaturesTest {
}
@Test
- public void
testSuppportedFeaturesFromMapFailureWithInvalidMissingMaxVersion() {
+ public void
testSupportedFeaturesFromMapFailureWithInvalidMissingMaxVersion() {
// This is invalid because 'max_version' key is missing.
Map<String, Map<String, Short>> invalidFeatures = mkMap(
mkEntry("feature_1", mkMap(mkEntry("min_version", (short) 1))));
diff --git
a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java
b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java
index b80fafe0dfc..19e3b6a7a37 100644
--- a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java
+++ b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java
@@ -121,8 +121,8 @@ public class MetricsTest {
s.add(metrics.metricName("test.min", "grp1"), new Min());
s.add(new Meter(TimeUnit.SECONDS, metrics.metricName("test.rate",
"grp1"),
metrics.metricName("test.total", "grp1")));
- s.add(new Meter(TimeUnit.SECONDS, new WindowedCount(),
metrics.metricName("test.occurences", "grp1"),
- metrics.metricName("test.occurences.total", "grp1")));
+ s.add(new Meter(TimeUnit.SECONDS, new WindowedCount(),
metrics.metricName("test.occurrences", "grp1"),
+ metrics.metricName("test.occurrences.total", "grp1")));
s.add(metrics.metricName("test.count", "grp1"), new WindowedCount());
s.add(new Percentiles(100, -100, 100, BucketSizing.CONSTANT,
new Percentile(metrics.metricName("test.median",
"grp1"), 50.0),
@@ -140,7 +140,7 @@ public class MetricsTest {
}
// prior to any time passing
double elapsedSecs = (config.timeWindowMs() * (config.samples() - 1))
/ 1000.0;
- assertEquals(count / elapsedSecs,
metricValueFunc.apply(metrics.metrics().get(metrics.metricName("test.occurences",
"grp1"))), EPS,
+ assertEquals(count / elapsedSecs,
metricValueFunc.apply(metrics.metrics().get(metrics.metricName("test.occurrences",
"grp1"))), EPS,
String.format("Occurrences(0...%d) = %f", count, count /
elapsedSecs));
// pretend 2 seconds passed...
@@ -158,7 +158,7 @@ public class MetricsTest {
"Min(0...9) = 0");
assertEquals(sum / elapsedSecs,
metricValueFunc.apply(metrics.metric(metrics.metricName("test.rate", "grp1"))),
EPS,
"Rate(0...9) = 1.40625");
- assertEquals(count / elapsedSecs,
metricValueFunc.apply(metrics.metric(metrics.metricName("test.occurences",
"grp1"))), EPS,
+ assertEquals(count / elapsedSecs,
metricValueFunc.apply(metrics.metric(metrics.metricName("test.occurrences",
"grp1"))), EPS,
String.format("Occurrences(0...%d) = %f", count, count /
elapsedSecs));
assertEquals(count,
metricValueFunc.apply(metrics.metric(metrics.metricName("test.count",
"grp1"))), EPS,
"Count(0...9) = 10");
diff --git
a/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java
b/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java
index f0fae567588..d11ca48d9b4 100644
---
a/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java
+++
b/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java
@@ -80,7 +80,7 @@ public class SslTransportTls12Tls13Test {
sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG,
Collections.singletonList("TLSv1.3"));
sslClientConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG,
Collections.singletonList(cipherSuite));
- checkAuthentiationFailed();
+ checkAuthenticationFailed();
}
/**
@@ -101,7 +101,7 @@ public class SslTransportTls12Tls13Test {
sslClientConfigs.put(SslConfigs.SSL_PROTOCOL_CONFIG, "TLSv1.3");
sslClientConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG,
Collections.singletonList(tls13CipherSuite));
- checkAuthentiationFailed();
+ checkAuthenticationFailed();
}
/**
@@ -138,7 +138,7 @@ public class SslTransportTls12Tls13Test {
}
/** Checks connection failed using the specified {@code tlsVersion}. */
- private void checkAuthentiationFailed() throws IOException,
InterruptedException {
+ private void checkAuthenticationFailed() throws IOException,
InterruptedException {
sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG,
Arrays.asList("TLSv1.3"));
createSelector(sslClientConfigs);
InetSocketAddress addr = new InetSocketAddress("localhost",
server.port());
diff --git
a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java
b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java
index a633c7db1e3..b85dcd8d51e 100644
---
a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java
+++
b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java
@@ -355,7 +355,7 @@ public class SaslAuthenticatorTest {
try {
InvalidScramServerCallbackHandler.sensitiveException =
- new IOException("Could not connect to password database
locahost:8000");
+ new IOException("Could not connect to password database
localhost:8000");
createAndCheckClientAuthenticationFailure(securityProtocol, "1",
"SCRAM-SHA-256", null);
InvalidScramServerCallbackHandler.sensitiveException =
diff --git
a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponseTest.java
b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponseTest.java
index e7204df660a..3b3c90bf1d2 100644
---
a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponseTest.java
+++
b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponseTest.java
@@ -35,7 +35,7 @@ public class OAuthBearerClientInitialResponseTest {
*/
@Test
public void testBuildClientResponseToBytes() throws Exception {
- String expectedMesssage = "n,,\u0001auth=Bearer
123.345.567\u0001nineteen=42\u0001\u0001";
+ String expectedMessage = "n,,\u0001auth=Bearer
123.345.567\u0001nineteen=42\u0001\u0001";
Map<String, String> extensions = new HashMap<>();
extensions.put("nineteen", "42");
@@ -43,7 +43,7 @@ public class OAuthBearerClientInitialResponseTest {
String message = new String(response.toBytes(),
StandardCharsets.UTF_8);
- assertEquals(expectedMesssage, message);
+ assertEquals(expectedMessage, message);
}
@Test
diff --git
a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java
b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java
index 39bd7b89f15..b2cd0e88914 100644
---
a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java
+++
b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java
@@ -181,14 +181,14 @@ public class OAuthBearerSaslServerTest {
}
@Test
- public void authorizatonIdEqualsAuthenticationId() throws Exception {
+ public void authorizationIdEqualsAuthenticationId() throws Exception {
byte[] nextChallenge = saslServer
.evaluateResponse(clientInitialResponse(USER));
assertEquals(0, nextChallenge.length, "Next challenge is not empty");
}
@Test
- public void authorizatonIdNotEqualsAuthenticationId() {
+ public void authorizationIdNotEqualsAuthenticationId() {
assertThrows(SaslAuthenticationException.class,
() -> saslServer.evaluateResponse(clientInitialResponse(USER +
"x")));
}
diff --git
a/clients/src/test/java/org/apache/kafka/common/security/plain/internals/PlainSaslServerTest.java
b/clients/src/test/java/org/apache/kafka/common/security/plain/internals/PlainSaslServerTest.java
index 6c71f51b40e..cb56859ebcf 100644
---
a/clients/src/test/java/org/apache/kafka/common/security/plain/internals/PlainSaslServerTest.java
+++
b/clients/src/test/java/org/apache/kafka/common/security/plain/internals/PlainSaslServerTest.java
@@ -60,13 +60,13 @@ public class PlainSaslServerTest {
}
@Test
- public void authorizatonIdEqualsAuthenticationId() {
+ public void authorizationIdEqualsAuthenticationId() {
byte[] nextChallenge = saslServer.evaluateResponse(saslMessage(USER_A,
USER_A, PASSWORD_A));
assertEquals(0, nextChallenge.length);
}
@Test
- public void authorizatonIdNotEqualsAuthenticationId() {
+ public void authorizationIdNotEqualsAuthenticationId() {
assertThrows(SaslAuthenticationException.class, () ->
saslServer.evaluateResponse(saslMessage(USER_B, USER_A, PASSWORD_A)));
}
diff --git
a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java
b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java
index f291e2d7d9c..7e83b76f6e1 100644
---
a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java
+++
b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java
@@ -58,13 +58,13 @@ public class ScramSaslServerTest {
}
@Test
- public void authorizatonIdEqualsAuthenticationId() throws Exception {
+ public void authorizationIdEqualsAuthenticationId() throws Exception {
byte[] nextChallenge =
saslServer.evaluateResponse(clientFirstMessage(USER_A, USER_A));
assertTrue(nextChallenge.length > 0, "Next challenge is empty");
}
@Test
- public void authorizatonIdNotEqualsAuthenticationId() {
+ public void authorizationIdNotEqualsAuthenticationId() {
assertThrows(SaslAuthenticationException.class, () ->
saslServer.evaluateResponse(clientFirstMessage(USER_A, USER_B)));
}
diff --git a/clients/src/test/java/org/apache/kafka/test/MetricsBench.java
b/clients/src/test/java/org/apache/kafka/test/MetricsBench.java
index 93cbf6d2a89..4063d161f96 100644
--- a/clients/src/test/java/org/apache/kafka/test/MetricsBench.java
+++ b/clients/src/test/java/org/apache/kafka/test/MetricsBench.java
@@ -49,8 +49,8 @@ public class MetricsBench {
long start = System.nanoTime();
for (int i = 0; i < iters; i++)
parent.record(i);
- double ellapsed = (System.nanoTime() - start) / (double) iters;
- System.out.println(String.format("%.2f ns per metric recording.",
ellapsed));
+ double elapsed = (System.nanoTime() - start) / (double) iters;
+ System.out.println(String.format("%.2f ns per metric recording.",
elapsed));
} finally {
metrics.close();
}