AndrewJSchofield commented on code in PR #18455:
URL: https://github.com/apache/kafka/pull/18455#discussion_r1909115021


##########
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerde.java:
##########
@@ -16,138 +16,31 @@
  */
 package org.apache.kafka.coordinator.group;
 
+import org.apache.kafka.common.errors.UnsupportedVersionException;
 import org.apache.kafka.common.protocol.ApiMessage;
-import org.apache.kafka.common.protocol.MessageUtil;
 import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader;
-import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord;
 import org.apache.kafka.coordinator.common.runtime.CoordinatorRecordSerde;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentKey;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentValue;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataKey;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataValue;
-import org.apache.kafka.coordinator.group.generated.ConsumerGroupMetadataKey;
-import org.apache.kafka.coordinator.group.generated.ConsumerGroupMetadataValue;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataKey;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataValue;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupRegularExpressionKey;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupRegularExpressionValue;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberKey;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberValue;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataKey;
-import 
org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataValue;
-import org.apache.kafka.coordinator.group.generated.GroupMetadataKey;
-import org.apache.kafka.coordinator.group.generated.GroupMetadataValue;
-import org.apache.kafka.coordinator.group.generated.OffsetCommitKey;
-import org.apache.kafka.coordinator.group.generated.OffsetCommitValue;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupCurrentMemberAssignmentKey;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupCurrentMemberAssignmentValue;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupMemberMetadataKey;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupMemberMetadataValue;
-import org.apache.kafka.coordinator.group.generated.ShareGroupMetadataKey;
-import org.apache.kafka.coordinator.group.generated.ShareGroupMetadataValue;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupPartitionMetadataKey;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupPartitionMetadataValue;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupStatePartitionMetadataKey;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupStatePartitionMetadataValue;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMemberKey;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMemberValue;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMetadataKey;
-import 
org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMetadataValue;
+import org.apache.kafka.coordinator.group.generated.CoordinatorRecordType;
 
 /**
  * Please ensure any new record added here stays in sync with DumpLogSegments.
  */
 public class GroupCoordinatorRecordSerde extends CoordinatorRecordSerde {
-    // This method is temporary until the share coordinator is converted to
-    // using the new coordinator records.
-    @Override
-    public byte[] serializeKey(CoordinatorRecord record) {
-        // Record does not accept a null key.
-        return MessageUtil.toCoordinatorTypePrefixedBytes(
-            record.key().version(),
-            record.key().message()
-        );
-    }
-
     @Override
     protected ApiMessage apiMessageKeyFor(short recordVersion) {

Review Comment:
   Personally, I would take this opportunity to stop using "recordVersion" as 
the variable name here. Really, it has become record type, which happened to be 
implemented as a version number.



##########
coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java:
##########
@@ -43,7 +43,7 @@ public abstract class CoordinatorRecordSerde implements 
Serializer<CoordinatorRe
     @Override
     public byte[] serializeKey(CoordinatorRecord record) {
         // Record does not accept a null key.
-        return MessageUtil.toVersionPrefixedBytes(
+        return MessageUtil.toCoordinatorTypePrefixedBytes(

Review Comment:
   So, it seems to me that `toCoordinatorTypePrefixedBytes` fixes the version 
of the key schema as 0, but using `toVersionPrefixedBytes` permits other 
versions of the record schemas. However, if I understand correctly, we do not 
want to use record schemas above 0 and prefer tagged fields for new fields 
added to records. What is the correct way to version records in the future?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to