This is an automated email from the ASF dual-hosted git repository.
ijuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new 93df7f5485e MINOR: Remove unused variables and other minor clean-ups
(#12952)
93df7f5485e is described below
commit 93df7f5485e8118a32341c722542fed54ce141fa
Author: Ismael Juma <[email protected]>
AuthorDate: Sun Dec 4 20:52:33 2022 -0800
MINOR: Remove unused variables and other minor clean-ups (#12952)
* Remove whitespace before package declaration
* Avoid unnecessary postfix language usage
Reviewers: Luke Chen <[email protected]>
---
.../kafka/admin/BrokerApiVersionsCommand.scala | 2 --
.../scala/kafka/admin/ConsumerGroupCommand.scala | 2 +-
.../main/scala/kafka/admin/FeatureCommand.scala | 29 +++++++++-------------
.../main/scala/kafka/network/RequestChannel.scala | 2 +-
.../main/scala/kafka/utils/CommandLineUtils.scala | 2 +-
.../scala/kafka/raft/KafkaMetadataLogTest.scala | 2 +-
.../kafka/server/ServerGenerateClusterIdTest.scala | 4 +--
7 files changed, 18 insertions(+), 25 deletions(-)
diff --git a/core/src/main/scala/kafka/admin/BrokerApiVersionsCommand.scala
b/core/src/main/scala/kafka/admin/BrokerApiVersionsCommand.scala
index 957cb2ce8bb..ea6ffeea65b 100644
--- a/core/src/main/scala/kafka/admin/BrokerApiVersionsCommand.scala
+++ b/core/src/main/scala/kafka/admin/BrokerApiVersionsCommand.scala
@@ -203,8 +203,6 @@ object BrokerApiVersionsCommand {
private object AdminClient {
val DefaultConnectionMaxIdleMs = 9 * 60 * 1000
val DefaultRequestTimeoutMs = 5000
- val DefaultSocketConnectionSetupMs =
CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG
- val DefaultSocketConnectionSetupMaxMs =
CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG
val DefaultMaxInFlightRequestsPerConnection = 100
val DefaultReconnectBackoffMs = 50
val DefaultReconnectBackoffMax = 50
diff --git a/core/src/main/scala/kafka/admin/ConsumerGroupCommand.scala
b/core/src/main/scala/kafka/admin/ConsumerGroupCommand.scala
index f3b67a030f9..b428c447d77 100755
--- a/core/src/main/scala/kafka/admin/ConsumerGroupCommand.scala
+++ b/core/src/main/scala/kafka/admin/ConsumerGroupCommand.scala
@@ -214,7 +214,7 @@ object ConsumerGroupCommand extends Logging {
private def printGroupStates(groupsAndStates: List[(String, String)]):
Unit = {
// find proper columns width
var maxGroupLen = 15
- for ((groupId, state) <- groupsAndStates) {
+ for ((groupId, _) <- groupsAndStates) {
maxGroupLen = Math.max(maxGroupLen, groupId.length)
}
println(s"%${-maxGroupLen}s %s".format("GROUP", "STATE"))
diff --git a/core/src/main/scala/kafka/admin/FeatureCommand.scala
b/core/src/main/scala/kafka/admin/FeatureCommand.scala
index 3ed99022ed1..26228c652c8 100644
--- a/core/src/main/scala/kafka/admin/FeatureCommand.scala
+++ b/core/src/main/scala/kafka/admin/FeatureCommand.scala
@@ -155,15 +155,11 @@ object FeatureCommand {
feature: String,
level: Short
): String = {
- if (feature.equals(MetadataVersion.FEATURE_NAME)) {
- try {
- MetadataVersion.fromFeatureLevel(level).version()
- } catch {
- case e: Throwable => s"UNKNOWN [${level}]"
- }
- } else {
+ if (feature.equals(MetadataVersion.FEATURE_NAME))
+ try MetadataVersion.fromFeatureLevel(level).version
+ catch { case _: Throwable => s"UNKNOWN [$level]"}
+ else
level.toString
- }
}
def handleDescribe(
@@ -172,8 +168,7 @@ object FeatureCommand {
): Unit = {
val featureMetadata = admin.describeFeatures().featureMetadata().get()
val featureList = new
java.util.TreeSet[String](featureMetadata.supportedFeatures().keySet())
- featureList.forEach {
- case feature =>
+ featureList.forEach { feature =>
val finalizedLevel =
featureMetadata.finalizedFeatures().asScala.get(feature) match {
case None => 0.toShort
case Some(v) => v.maxVersionLevel()
@@ -218,12 +213,12 @@ object FeatureCommand {
}
val name = input.substring(0, equalsIndex).trim
val levelString = input.substring(equalsIndex + 1).trim
- val level = try {
- levelString.toShort
- } catch {
- case e: Throwable => throw new TerseFailure(s"Can't parse feature=level
string ${input}: " +
- s"unable to parse ${levelString} as a short.")
- }
+ val level =
+ try levelString.toShort
+ catch {
+ case _: Throwable => throw new TerseFailure(s"Can't parse
feature=level string ${input}: " +
+ s"unable to parse ${levelString} as a short.")
+ }
(name, level)
}
@@ -239,7 +234,7 @@ object FeatureCommand {
val version = try {
MetadataVersion.fromVersionString(metadata)
} catch {
- case e: Throwable => throw new TerseFailure("Unsupported metadata
version " + metadata +
+ case _: Throwable => throw new TerseFailure("Unsupported metadata
version " + metadata +
". Supported metadata versions are " + metadataVersionsToString(
MetadataVersion.MINIMUM_BOOTSTRAP_VERSION, MetadataVersion.latest()))
}
diff --git a/core/src/main/scala/kafka/network/RequestChannel.scala
b/core/src/main/scala/kafka/network/RequestChannel.scala
index 76bb25e9237..8686d15a1d3 100644
--- a/core/src/main/scala/kafka/network/RequestChannel.scala
+++ b/core/src/main/scala/kafka/network/RequestChannel.scala
@@ -123,7 +123,7 @@ object RequestChannel extends Logging {
private def shouldReturnNotController(response: AbstractResponse): Boolean
= {
response match {
- case describeQuorumResponse: DescribeQuorumResponse =>
response.errorCounts.containsKey(Errors.NOT_LEADER_OR_FOLLOWER)
+ case _: DescribeQuorumResponse =>
response.errorCounts.containsKey(Errors.NOT_LEADER_OR_FOLLOWER)
case _ => response.errorCounts.containsKey(Errors.NOT_CONTROLLER)
}
}
diff --git a/core/src/main/scala/kafka/utils/CommandLineUtils.scala
b/core/src/main/scala/kafka/utils/CommandLineUtils.scala
index 0aa233babcd..e9dcee0eb18 100644
--- a/core/src/main/scala/kafka/utils/CommandLineUtils.scala
+++ b/core/src/main/scala/kafka/utils/CommandLineUtils.scala
@@ -14,7 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
- package kafka.utils
+package kafka.utils
import java.util.Properties
diff --git a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala
b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala
index fa9885e3583..7cc63f0e0a0 100644
--- a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala
+++ b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala
@@ -499,7 +499,7 @@ final class KafkaMetadataLogTest {
@Test
def testCleanupPartialSnapshots(): Unit = {
- val (logDir, log, config) = buildMetadataLogAndDir(tempDir, mockTime)
+ val (logDir, log, _) = buildMetadataLogAndDir(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 1
val snapshotId = new OffsetAndEpoch(1, epoch)
diff --git
a/core/src/test/scala/unit/kafka/server/ServerGenerateClusterIdTest.scala
b/core/src/test/scala/unit/kafka/server/ServerGenerateClusterIdTest.scala
index d2a6df5dda3..8a7957d90c9 100755
--- a/core/src/test/scala/unit/kafka/server/ServerGenerateClusterIdTest.scala
+++ b/core/src/test/scala/unit/kafka/server/ServerGenerateClusterIdTest.scala
@@ -122,7 +122,7 @@ class ServerGenerateClusterIdTest extends QuorumTestHarness
{
@Test
def testAutoGenerateClusterIdForKafkaClusterParallel(): Unit = {
val firstBoot = Future.traverse(Seq(config1, config2, config3))(config =>
Future(TestUtils.createServer(config, threadNamePrefix =
Option(this.getClass.getName))))
- servers = Await.result(firstBoot, 100 second)
+ servers = Await.result(firstBoot, 100.second)
val Seq(server1, server2, server3) = servers
val clusterIdFromServer1 = server1.clusterId
@@ -138,7 +138,7 @@ class ServerGenerateClusterIdTest extends QuorumTestHarness
{
server.startup()
server
})
- servers = Await.result(secondBoot, 100 second)
+ servers = Await.result(secondBoot, 100.second)
servers.foreach(server => assertEquals(clusterIdFromServer1,
server.clusterId))
servers.foreach(_.shutdown())