This is an automated email from the ASF dual-hosted git repository.

mjsax pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 9b468fb2787 MINOR: Do not end Javadoc comments with `**/` (#14540)
9b468fb2787 is described below

commit 9b468fb278701be836a2641650356907bf84860a
Author: Matthias J. Sax <[email protected]>
AuthorDate: Tue Oct 17 21:11:04 2023 -0700

    MINOR: Do not end Javadoc comments with `**/` (#14540)
    
    Reviewers: Bruno Cadonna <[email protected]>, Bill Bejeck 
<[email protected]>, Hao Li <[email protected]>, Josep Prat 
<[email protected]>
---
 .../org/apache/kafka/clients/admin/KafkaAdminClient.java   |  2 +-
 .../org/apache/kafka/clients/consumer/KafkaConsumer.java   |  2 +-
 .../clients/consumer/internals/CommitRequestManager.java   |  2 +-
 .../clients/consumer/internals/FetchMetricsRegistry.java   |  6 +++---
 .../clients/consumer/internals/OffsetsRequestManager.java  |  4 ++--
 .../clients/producer/internals/SenderMetricsRegistry.java  |  6 +++---
 .../java/org/apache/kafka/common/protocol/ApiKeys.java     |  2 +-
 .../apache/kafka/connect/health/ConnectClusterDetails.java |  2 +-
 .../apache/kafka/connect/health/ConnectClusterState.java   |  2 +-
 .../apache/kafka/connect/transforms/Transformation.java    |  4 ++--
 .../kafka/connect/runtime/ConnectMetricsRegistry.java      | 14 +++++++-------
 .../org/apache/kafka/connect/runtime/WorkerSinkTask.java   |  2 +-
 .../test/java/kafka/test/junit/ClusterTestExtensions.java  |  2 +-
 .../java/org/apache/kafka/jmh/util/ByteUtilsBenchmark.java |  2 +-
 .../streams/processor/internals/StateDirectoryTest.java    |  4 +---
 .../processor/internals/StreamsAssignmentScaleTest.java    |  6 +++---
 .../java/org/apache/kafka/tools/VerifiableProducer.java    |  2 +-
 17 files changed, 31 insertions(+), 33 deletions(-)

diff --git 
a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java 
b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
index 27d28b5b336..7a7873cd7cc 100644
--- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
+++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
@@ -1303,7 +1303,7 @@ public class KafkaAdminClient extends AdminClient {
          *
          * @param now                   The current time in milliseconds.
          * @param responses             The latest responses from KafkaClient.
-         **/
+         */
         private void handleResponses(long now, List<ClientResponse> responses) 
{
             for (ClientResponse response : responses) {
                 int correlationId = response.requestHeader().correlationId();
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index aa4b913b493..f9e057c2f61 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -2237,7 +2237,7 @@ public class KafkaConsumer<K, V> implements Consumer<K, 
V> {
      * @return This {@code Consumer} instance's current lag for the given 
partition.
      *
      * @throws IllegalStateException if the {@code topicPartition} is not 
assigned
-     **/
+     */
     @Override
     public OptionalLong currentLag(TopicPartition topicPartition) {
         acquireAndEnsureOpen();
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
index 64fc41d3f9e..060845916bb 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
@@ -445,7 +445,7 @@ public class CommitRequestManager implements RequestManager 
{
          * {@code inflightOffsetFetches} to bookkeep all the inflight requests.
          * Note: Sendable requests are determined by their timer as we are 
expecting backoff on failed attempt. See
          * {@link RequestState}.
-         **/
+         */
         List<NetworkClientDelegate.UnsentRequest> drain(final long 
currentTimeMs) {
             List<NetworkClientDelegate.UnsentRequest> unsentRequests = new 
ArrayList<>();
 
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java
index fc4ac1d665e..b6b269a87e8 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java
@@ -66,7 +66,7 @@ public class FetchMetricsRegistry {
 
     public FetchMetricsRegistry(Set<String> tags, String metricGrpPrefix) {
 
-        /***** Client level *****/
+        /* Client level */
         String groupName = metricGrpPrefix + "-fetch-manager-metrics";
 
         this.fetchSizeAvg = new MetricNameTemplate("fetch-size-avg", groupName,
@@ -105,7 +105,7 @@ public class FetchMetricsRegistry {
         this.fetchThrottleTimeMax = new 
MetricNameTemplate("fetch-throttle-time-max", groupName,
                 "The maximum throttle time in ms", tags);
 
-        /***** Topic level *****/
+        /*  Topic level */
         Set<String> topicTags = new LinkedHashSet<>(tags);
         topicTags.add("topic");
 
@@ -125,7 +125,7 @@ public class FetchMetricsRegistry {
         this.topicRecordsConsumedTotal = new 
MetricNameTemplate("records-consumed-total", groupName,
                 "The total number of records consumed for a topic", topicTags);
 
-        /***** Partition level *****/
+        /* Partition level */
         Set<String> partitionTags = new HashSet<>(topicTags);
         partitionTags.add("partition");
         this.partitionRecordsLag = new MetricNameTemplate("records-lag", 
groupName,
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
index 0e333618255..a1cb8d54a97 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
@@ -325,7 +325,7 @@ public class OffsetsRequestManager implements 
RequestManager, ClusterResourceLis
     /**
      * Build ListOffsets request to send to a specific broker for the 
partitions and
      * target timestamps. This also adds the request to the list of 
unsentRequests.
-     **/
+     */
     private CompletableFuture<ListOffsetResult> buildListOffsetRequestToNode(
             Node node,
             Map<TopicPartition, ListOffsetsRequestData.ListOffsetsPartition> 
targetTimes,
@@ -477,7 +477,7 @@ public class OffsetsRequestManager implements 
RequestManager, ClusterResourceLis
     /**
      * Build OffsetsForLeaderEpoch request to send to a specific broker for 
the partitions and
      * positions to fetch. This also adds the request to the list of 
unsentRequests.
-     **/
+     */
     private CompletableFuture<OffsetsForLeaderEpochUtils.OffsetForEpochResult> 
buildOffsetsForLeaderEpochRequestToNode(
             final Node node,
             final Map<TopicPartition, SubscriptionState.FetchPosition> 
fetchPositions,
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/internals/SenderMetricsRegistry.java
 
b/clients/src/main/java/org/apache/kafka/clients/producer/internals/SenderMetricsRegistry.java
index 2ad2cba09e8..f13d13c5d81 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/producer/internals/SenderMetricsRegistry.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/producer/internals/SenderMetricsRegistry.java
@@ -76,7 +76,7 @@ public class SenderMetricsRegistry {
         this.tags = this.metrics.config().tags().keySet();
         this.allTemplates = new ArrayList<>();
         
-        /***** Client level *****/
+        /* Client level */
         
         this.batchSizeAvg = createMetricName("batch-size-avg",
                 "The average number of bytes sent per partition per-request.");
@@ -125,7 +125,7 @@ public class SenderMetricsRegistry {
         this.produceThrottleTimeMax = 
createMetricName("produce-throttle-time-max",
                 "The maximum time in ms a request was throttled by a broker");
 
-        /***** Topic level *****/
+        /* Topic level */
         this.topicTags = new LinkedHashSet<>(tags);
         this.topicTags.add("topic");
 
@@ -160,7 +160,7 @@ public class SenderMetricsRegistry {
         return createTemplate(name, TOPIC_METRIC_GROUP_NAME, description, 
this.topicTags);
     }
 
-    /** topic level metrics **/
+    /* topic level metrics */
     public MetricName topicRecordSendRate(Map<String, String> tags) {
         return this.metrics.metricInstance(this.topicRecordSendRate, tags);
     }
diff --git 
a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java 
b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java
index 64bbe1557c4..d98a48b6160 100644
--- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java
+++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java
@@ -140,7 +140,7 @@ public enum ApiKeys {
     /** indicates the minimum required inter broker magic required to support 
the API */
     public final byte minRequiredInterBrokerMagic;
 
-    /** indicates whether the API is enabled for forwarding **/
+    /** indicates whether the API is enabled for forwarding */
     public final boolean forwardable;
 
     public final boolean requiresDelayedAllocation;
diff --git 
a/connect/api/src/main/java/org/apache/kafka/connect/health/ConnectClusterDetails.java
 
b/connect/api/src/main/java/org/apache/kafka/connect/health/ConnectClusterDetails.java
index edde6ff657a..ced9122ae6c 100644
--- 
a/connect/api/src/main/java/org/apache/kafka/connect/health/ConnectClusterDetails.java
+++ 
b/connect/api/src/main/java/org/apache/kafka/connect/health/ConnectClusterDetails.java
@@ -27,6 +27,6 @@ public interface ConnectClusterDetails {
      * Get the cluster ID of the Kafka cluster backing this Connect cluster.
      * 
      * @return the cluster ID of the Kafka cluster backing this Connect cluster
-     **/
+     */
     String kafkaClusterId();
 }
\ No newline at end of file
diff --git 
a/connect/api/src/main/java/org/apache/kafka/connect/health/ConnectClusterState.java
 
b/connect/api/src/main/java/org/apache/kafka/connect/health/ConnectClusterState.java
index 753ee1a7613..e42564ca457 100644
--- 
a/connect/api/src/main/java/org/apache/kafka/connect/health/ConnectClusterState.java
+++ 
b/connect/api/src/main/java/org/apache/kafka/connect/health/ConnectClusterState.java
@@ -65,7 +65,7 @@ public interface ConnectClusterState {
      * Get details about the setup of the Connect cluster.
      * @return a {@link ConnectClusterDetails} object containing information 
about the cluster
      * @throws java.lang.UnsupportedOperationException if the default 
implementation has not been overridden
-     **/
+     */
     default ConnectClusterDetails clusterDetails() {
         throw new UnsupportedOperationException();
     }
diff --git 
a/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java
 
b/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java
index 87a5a033f24..d5e42ebe8bc 100644
--- 
a/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java
+++ 
b/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java
@@ -50,10 +50,10 @@ public interface Transformation<R extends ConnectRecord<R>> 
extends Configurable
      */
     R apply(R record);
 
-    /** Configuration specification for this transformation. **/
+    /** Configuration specification for this transformation. */
     ConfigDef config();
 
-    /** Signal that this transformation instance will no longer will be used. 
**/
+    /** Signal that this transformation instance will no longer will be used. 
*/
     @Override
     void close();
 
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java
index 2a4fe6ae716..1d144440f2c 100644
--- 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java
@@ -123,7 +123,7 @@ public class ConnectMetricsRegistry {
     }
 
     public ConnectMetricsRegistry(Set<String> tags) {
-        /***** Connector level *****/
+        /* Connector level */
         Set<String> connectorTags = new LinkedHashSet<>(tags);
         connectorTags.add(CONNECTOR_TAG_NAME);
 
@@ -137,7 +137,7 @@ public class ConnectMetricsRegistry {
         connectorVersion = createTemplate("connector-version", 
CONNECTOR_GROUP_NAME,
                                           "The version of the connector class, 
as reported by the connector.", connectorTags);
 
-        /***** Worker task level *****/
+        /* Worker task level */
         Set<String> workerTaskTags = new LinkedHashSet<>(tags);
         workerTaskTags.add(CONNECTOR_TAG_NAME);
         workerTaskTags.add(TASK_TAG_NAME);
@@ -165,7 +165,7 @@ public class ConnectMetricsRegistry {
                                                      "The average percentage 
of this task's offset commit attempts that succeeded.",
                                                      workerTaskTags);
 
-        /***** Source worker task level *****/
+        /* Source worker task level */
         Set<String> sourceTaskTags = new LinkedHashSet<>(tags);
         sourceTaskTags.add(CONNECTOR_TAG_NAME);
         sourceTaskTags.add(TASK_TAG_NAME);
@@ -219,7 +219,7 @@ public class ConnectMetricsRegistry {
                                             "The average number of records in 
the transactions the task has committed so far.",
                                             sourceTaskTags);
 
-        /***** Sink worker task level *****/
+        /* Sink worker task level */
         Set<String> sinkTaskTags = new LinkedHashSet<>(tags);
         sinkTaskTags.add(CONNECTOR_TAG_NAME);
         sinkTaskTags.add(TASK_TAG_NAME);
@@ -286,7 +286,7 @@ public class ConnectMetricsRegistry {
                                                   + 
"committed/flushed/acknowledged by the sink task.",
                                                   sinkTaskTags);
 
-        /***** Worker level *****/
+        /* Worker level */
         Set<String> workerTags = new LinkedHashSet<>(tags);
 
         connectorCount = createTemplate("connector-count", WORKER_GROUP_NAME, 
"The number of connectors run in this worker.", workerTags);
@@ -341,7 +341,7 @@ public class ConnectMetricsRegistry {
         connectorStatusMetrics.put(connectorRestartingTaskCount, 
TaskStatus.State.RESTARTING);
         connectorStatusMetrics = 
Collections.unmodifiableMap(connectorStatusMetrics);
 
-        /***** Worker rebalance level *****/
+        /* Worker rebalance level */
         Set<String> rebalanceTags = new LinkedHashSet<>(tags);
 
         connectProtocol = createTemplate("connect-protocol", 
WORKER_REBALANCE_GROUP_NAME, "The Connect protocol used by this cluster", 
rebalanceTags);
@@ -358,7 +358,7 @@ public class ConnectMetricsRegistry {
         rebalanceTimeSinceLast = 
createTemplate("time-since-last-rebalance-ms", WORKER_REBALANCE_GROUP_NAME,
                                                 "The time in milliseconds 
since this worker completed the most recent rebalance.", rebalanceTags);
 
-        /***** Task Error Handling Metrics *****/
+        /* Task Error Handling Metrics */
         Set<String> taskErrorHandlingTags = new LinkedHashSet<>(tags);
         taskErrorHandlingTags.add(CONNECTOR_TAG_NAME);
         taskErrorHandlingTags.add(TASK_TAG_NAME);
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
index 7b514016c37..4d400260081 100644
--- 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
@@ -370,7 +370,7 @@ class WorkerSinkTask extends WorkerTask {
     /**
      * Starts an offset commit by flushing outstanding messages from the task 
and then starting
      * the write commit.
-     **/
+     */
     private void doCommit(Map<TopicPartition, OffsetAndMetadata> offsets, 
boolean closing, int seqno) {
         if (isCancelled()) {
             log.debug("Skipping final offset commit as task has been 
cancelled");
diff --git a/core/src/test/java/kafka/test/junit/ClusterTestExtensions.java 
b/core/src/test/java/kafka/test/junit/ClusterTestExtensions.java
index bd69109c4b7..50e2a063649 100644
--- a/core/src/test/java/kafka/test/junit/ClusterTestExtensions.java
+++ b/core/src/test/java/kafka/test/junit/ClusterTestExtensions.java
@@ -74,7 +74,7 @@ import java.util.stream.Stream;
  * will generate two invocations of "someTest" (since ClusterType.Both was 
given). For each invocation, the test class
  * SomeIntegrationTest will be instantiated, lifecycle methods (before/after) 
will be run, and "someTest" will be invoked.
  *
- **/
+ */
 public class ClusterTestExtensions implements 
TestTemplateInvocationContextProvider {
     @Override
     public boolean supportsTestTemplate(ExtensionContext context) {
diff --git 
a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/util/ByteUtilsBenchmark.java
 
b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/util/ByteUtilsBenchmark.java
index 29b99d542c4..88c867a953f 100644
--- 
a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/util/ByteUtilsBenchmark.java
+++ 
b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/util/ByteUtilsBenchmark.java
@@ -293,7 +293,7 @@ public class ByteUtilsBenchmark {
     }
 
 
-    /******************* Implementations **********************/
+    /* Implementations */
 
     /*
      * Implementation in Trunk as of Apr 2023 / v3.4
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java
index c387823fe03..f0edf962154 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java
@@ -639,7 +639,7 @@ public class StateDirectoryTest {
         }
     }
 
-    /************* Named Topology Tests *************/
+    /* Named Topology Tests */
 
     @Test
     public void shouldCreateTaskDirectoriesUnderNamedTopologyDirs() throws 
IOException {
@@ -767,8 +767,6 @@ public class StateDirectoryTest {
         }
     }
 
-    /************************************************/
-
     @Test
     public void shouldPersistProcessIdAcrossRestart() {
         final UUID processId = directory.initializeProcessId();
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsAssignmentScaleTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsAssignmentScaleTest.java
index 9360206c35e..834b6242194 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsAssignmentScaleTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsAssignmentScaleTest.java
@@ -73,7 +73,7 @@ public class StreamsAssignmentScaleTest {
 
     private final Logger log = 
LoggerFactory.getLogger(StreamsAssignmentScaleTest.class);
 
-    /************ HighAvailabilityTaskAssignor tests ************/
+    /* HighAvailabilityTaskAssignor tests */
 
     @Test(timeout = 120 * 1000)
     public void testHighAvailabilityTaskAssignorLargePartitionCount() {
@@ -95,7 +95,7 @@ public class StreamsAssignmentScaleTest {
         completeLargeAssignment(1_000, 10, 1000, 1, 
HighAvailabilityTaskAssignor.class);
     }
 
-    /************ StickyTaskAssignor tests ************/
+    /* StickyTaskAssignor tests */
 
     @Test(timeout = 120 * 1000)
     public void testStickyTaskAssignorLargePartitionCount() {
@@ -117,7 +117,7 @@ public class StreamsAssignmentScaleTest {
         completeLargeAssignment(1_000, 10, 1000, 1, StickyTaskAssignor.class);
     }
 
-    /************ FallbackPriorTaskAssignor tests ************/
+    /* FallbackPriorTaskAssignor tests */
 
     @Test(timeout = 120 * 1000)
     public void testFallbackPriorTaskAssignorLargePartitionCount() {
diff --git a/tools/src/main/java/org/apache/kafka/tools/VerifiableProducer.java 
b/tools/src/main/java/org/apache/kafka/tools/VerifiableProducer.java
index bda15f8306d..fa4eff42c44 100644
--- a/tools/src/main/java/org/apache/kafka/tools/VerifiableProducer.java
+++ b/tools/src/main/java/org/apache/kafka/tools/VerifiableProducer.java
@@ -290,7 +290,7 @@ public class VerifiableProducer implements AutoCloseable {
         }
     }
 
-    /** Returns a string to publish: ether 'valuePrefix'.'val' or 'val' **/
+    /** Returns a string to publish: ether 'valuePrefix'.'val' or 'val' */
     public String getValue(long val) {
         if (this.valuePrefix != null) {
             return String.format("%d.%d", this.valuePrefix, val);

Reply via email to