FrankYang0529 commented on code in PR #17958: URL: https://github.com/apache/kafka/pull/17958#discussion_r1863713283
########## core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala: ########## @@ -213,4 +216,111 @@ class ConsumerGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCo admin.close() } } + + @ClusterTest( + types = Array(Type.KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testConsumerGroupDescribeWithMigrationMember(): Unit = { + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + + // Create the topic. + val topicName = "foo" + createTopic( + topic = "foo", + numPartitions = 3 + ) + + val groupId = "grp" + + // Classic member 1 joins the classic group. + val memberId1 = joinDynamicConsumerGroupWithOldProtocol( + groupId = groupId, + metadata = ConsumerProtocol.serializeSubscription( + new ConsumerPartitionAssignor.Subscription( + Collections.singletonList(topicName), + null, + List().asJava + ) + ).array, + assignment = ConsumerProtocol.serializeAssignment( + new ConsumerPartitionAssignor.Assignment( + List(0, 1, 2).map(p => new TopicPartition(topicName, p)).asJava + ) + ).array + )._1 + + // The joining request with a consumer group member 2 is accepted. + consumerGroupHeartbeat( + groupId = groupId, + memberId = "member-2", + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List(topicName), + topicPartitions = List.empty, + expectedError = Errors.NONE + ).memberId() + + // version 0 doesn't have isClassic field, so isClassic field on member 1 is false + val actual = consumerGroupDescribe( + groupIds = List(groupId), + includeAuthorizedOperations = true, + version = 0.toShort, + ) + assertEquals(1, actual.size) + val group = actual.head + val member1 = group.members().asScala.find(_.memberId() == memberId1) + assertNotNull(member1) + assertFalse(member1.get.isClassic) + + // starting from version 1, there is isClassic field in ConsumerGroupDescribeResponse + for (version <- 1.toShort to ApiKeys.CONSUMER_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) { + val actual = consumerGroupDescribe( + groupIds = List(groupId), + includeAuthorizedOperations = true, + version = version.toShort, + ) + assertEquals(1, actual.size) + val group = actual.head + val member1 = group.members().asScala.find(_.memberId() == memberId1) + assertNotNull(member1) + assertTrue(member1.get.isClassic) + } Review Comment: Yes, updated it. Thanks for the suggestion. ########## core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala: ########## @@ -213,4 +216,111 @@ class ConsumerGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCo admin.close() } } + + @ClusterTest( + types = Array(Type.KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testConsumerGroupDescribeWithMigrationMember(): Unit = { + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + + // Create the topic. + val topicName = "foo" + createTopic( + topic = "foo", + numPartitions = 3 + ) + + val groupId = "grp" + + // Classic member 1 joins the classic group. + val memberId1 = joinDynamicConsumerGroupWithOldProtocol( + groupId = groupId, + metadata = ConsumerProtocol.serializeSubscription( + new ConsumerPartitionAssignor.Subscription( + Collections.singletonList(topicName), + null, + List().asJava + ) + ).array, + assignment = ConsumerProtocol.serializeAssignment( + new ConsumerPartitionAssignor.Assignment( + List(0, 1, 2).map(p => new TopicPartition(topicName, p)).asJava + ) + ).array + )._1 + + // The joining request with a consumer group member 2 is accepted. + consumerGroupHeartbeat( + groupId = groupId, + memberId = "member-2", + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List(topicName), + topicPartitions = List.empty, + expectedError = Errors.NONE + ).memberId() + + // version 0 doesn't have isClassic field, so isClassic field on member 1 is false + val actual = consumerGroupDescribe( + groupIds = List(groupId), + includeAuthorizedOperations = true, + version = 0.toShort, + ) + assertEquals(1, actual.size) + val group = actual.head + val member1 = group.members().asScala.find(_.memberId() == memberId1) + assertNotNull(member1) + assertFalse(member1.get.isClassic) + + // starting from version 1, there is isClassic field in ConsumerGroupDescribeResponse + for (version <- 1.toShort to ApiKeys.CONSUMER_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) { + val actual = consumerGroupDescribe( + groupIds = List(groupId), + includeAuthorizedOperations = true, + version = version.toShort, + ) + assertEquals(1, actual.size) + val group = actual.head + val member1 = group.members().asScala.find(_.memberId() == memberId1) + assertNotNull(member1) + assertTrue(member1.get.isClassic) + } + + // Classic member 1 leaves group. + leaveGroup( + groupId = groupId, + memberId = memberId1, + useNewProtocol = false, + version = ApiKeys.LEAVE_GROUP.latestVersion(isUnstableApiEnabled) + ) + + // member 1 joins as consumer group member + consumerGroupHeartbeat( + groupId = groupId, + memberId = memberId1, + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List("foo"), + topicPartitions = List.empty, + expectedError = Errors.NONE + ) + + // there is no classic member in the group Review Comment: Updated it. Thanks for the reminder. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org