aljoscha commented on a change in pull request #8535: [FLINK-11693] Add 
KafkaSerializationSchema that uses ProducerRecord
URL: https://github.com/apache/flink/pull/8535#discussion_r298036911
 
 

 ##########
 File path: 
flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer.java
 ##########
 @@ -625,34 +765,53 @@ public void onCompletion(RecordMetadata metadata, 
Exception exception) {
        public void invoke(FlinkKafkaProducer.KafkaTransactionState 
transaction, IN next, Context context) throws FlinkKafkaException {
                checkErroneous();
 
-               byte[] serializedKey = schema.serializeKey(next);
-               byte[] serializedValue = schema.serializeValue(next);
-               String targetTopic = schema.getTargetTopic(next);
-               if (targetTopic == null) {
-                       targetTopic = defaultTopicId;
-               }
+               ProducerRecord<byte[], byte[]> record;
+               if (keyedSchema != null) {
+                       byte[] serializedKey = keyedSchema.serializeKey(next);
+                       byte[] serializedValue = 
keyedSchema.serializeValue(next);
+                       String targetTopic = keyedSchema.getTargetTopic(next);
+                       if (targetTopic == null) {
+                               targetTopic = defaultTopicId;
+                       }
 
-               Long timestamp = null;
-               if (this.writeTimestampToKafka) {
-                       timestamp = context.timestamp();
-               }
+                       Long timestamp = null;
+                       if (this.writeTimestampToKafka) {
+                               timestamp = context.timestamp();
+                       }
 
-               ProducerRecord<byte[], byte[]> record;
-               int[] partitions = topicPartitionsMap.get(targetTopic);
-               if (null == partitions) {
-                       partitions = getPartitionsByTopic(targetTopic, 
transaction.producer);
-                       topicPartitionsMap.put(targetTopic, partitions);
-               }
-               if (flinkKafkaPartitioner != null) {
-                       record = new ProducerRecord<>(
-                               targetTopic,
-                               flinkKafkaPartitioner.partition(next, 
serializedKey, serializedValue, targetTopic, partitions),
-                               timestamp,
-                               serializedKey,
-                               serializedValue);
+                       int[] partitions = topicPartitionsMap.get(targetTopic);
+                       if (null == partitions) {
+                               partitions = getPartitionsByTopic(targetTopic, 
transaction.producer);
+                               topicPartitionsMap.put(targetTopic, partitions);
+                       }
+                       if (flinkKafkaPartitioner != null) {
+                               record = new ProducerRecord<>(
+                                               targetTopic,
+                                               
flinkKafkaPartitioner.partition(next, serializedKey, serializedValue, 
targetTopic, partitions),
+                                               timestamp,
+                                               serializedKey,
+                                               serializedValue);
+                       } else {
+                               record = new ProducerRecord<>(targetTopic, 
null, timestamp, serializedKey, serializedValue);
+                       }
                } else {
-                       record = new ProducerRecord<>(targetTopic, null, 
timestamp, serializedKey, serializedValue);
+                       if (kafkaSchema instanceof 
ContextAwareSerializationSchema) {
+                               ContextAwareSerializationSchema<IN> 
contextAwareSchema =
+                                               
(ContextAwareSerializationSchema<IN>) kafkaSchema;
+
+                               String targetTopic = 
contextAwareSchema.getTargetTopic(next);
 
 Review comment:
   Fixing 👍 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to