ijuma commented on code in PR #18632:
URL: https://github.com/apache/kafka/pull/18632#discussion_r1925424534


##########
core/src/main/scala/kafka/server/KafkaApis.scala:
##########
@@ -113,11 +113,8 @@ class KafkaApis(val requestChannel: RequestChannel,
   val requestHelper = new RequestHandlerHelper(requestChannel, quotas, time)
   val aclApis = new AclApis(authHelper, authorizer, requestHelper, "broker", 
config)
   val configManager = new ConfigAdminManager(brokerId, config, 
configRepository)
-  val describeTopicPartitionsRequestHandler : 
Option[DescribeTopicPartitionsRequestHandler] = metadataCache match {
-    case kRaftMetadataCache: KRaftMetadataCache =>
-      Some(new DescribeTopicPartitionsRequestHandler(kRaftMetadataCache, 
authHelper, config))
-    case _ => None
-  }
+  val describeTopicPartitionsRequestHandler : 
DescribeTopicPartitionsRequestHandler = new 
DescribeTopicPartitionsRequestHandler(

Review Comment:
   This is an existing issue, but since you're touching this file, remove the ` 
: DescribeTopicPartitionsRequestHandler` (it's redundant).



##########
core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala:
##########
@@ -272,7 +272,7 @@ class KRaftMetadataCache(
    * @param maximumNumberOfPartitions     The max number of partitions to 
return.
    * @param ignoreTopicsWithExceptions    Whether ignore the topics with 
exception.
    */

Review Comment:
   Move this scaladoc to the interface.



##########
core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java:
##########
@@ -387,8 +388,8 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() {
                 .setPartitionEpoch(2)
                 .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value())
         );
-        KRaftMetadataCache metadataCache = new KRaftMetadataCache(0, () -> 
KRaftVersion.KRAFT_VERSION_1);
-        updateKraftMetadataCache(metadataCache, records);
+        MetadataCache metadataCache = new KRaftMetadataCache(0, () -> 
KRaftVersion.KRAFT_VERSION_1);
+        updateKraftMetadataCache((KRaftMetadataCache) metadataCache, records);

Review Comment:
   Same as the comment above.



##########
core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java:
##########
@@ -190,8 +191,8 @@ void testDescribeTopicPartitionsRequest() {
                 .setPartitionEpoch(2)
                 .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value())
         );
-        KRaftMetadataCache metadataCache = new KRaftMetadataCache(0, () -> 
KRaftVersion.KRAFT_VERSION_1);
-        updateKraftMetadataCache(metadataCache, records);
+        MetadataCache metadataCache = new KRaftMetadataCache(0, () -> 
KRaftVersion.KRAFT_VERSION_1);
+        updateKraftMetadataCache((KRaftMetadataCache) metadataCache, records);

Review Comment:
   Both of these changes seem to make things worse.



##########
core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala:
##########
@@ -348,6 +348,7 @@ class PartitionLockTest extends Logging {
     val controllerEpoch = 0
     val replicas = (0 to numReplicaFetchers).map(i => Integer.valueOf(brokerId 
+ i)).toList.asJava
     val isr = replicas
+    replicas.forEach(replicaId => 
when(metadataCache.getAliveBrokerEpoch(replicaId)).thenReturn(Some(1L)))

Review Comment:
   Why do we need this extra code?



##########
core/src/test/scala/unit/kafka/cluster/PartitionTest.scala:
##########
@@ -186,6 +186,7 @@ class PartitionTest extends AbstractPartitionTest {
     val leaderEpoch = 10
     val logStartOffset = 0L
     val partition = setupPartitionWithMocks(leaderEpoch = leaderEpoch, 
isLeader = true)
+    addBrokerEpochToMockMetadataCache(metadataCache, List(remoteReplicaId))

Review Comment:
   Hmm, why are these changes needed?



##########
core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala:
##########
@@ -793,6 +793,7 @@ class ReplicaManagerTest {
 
     try {
       val brokerList = Seq[Integer](0, 1).asJava
+      
when(replicaManager.metadataCache.getAliveBrokerEpoch(1)).thenReturn(Some(brokerEpoch))

Review Comment:
   Why is this change needed?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to