eolivelli commented on a change in pull request #9448:
URL: https://github.com/apache/pulsar/pull/9448#discussion_r589221356



##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java
##########
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.io.kafka;
+
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import 
io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.serializers.subject.TopicNameStrategy;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema;
+import org.apache.pulsar.common.schema.SchemaInfo;
+import org.apache.pulsar.common.schema.SchemaType;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
+
+@Slf4j
+class AvroSchemaCache {
+    private ConcurrentHashMap<Integer, Schema<byte[]>> cache = new 
ConcurrentHashMap<>();
+    private final SchemaRegistryClient schemaRegistryClient;
+
+    public AvroSchemaCache(SchemaRegistryClient schemaRegistryClient) {
+        this.schemaRegistryClient = schemaRegistryClient;
+    }
+
+    public Schema<byte[]> get(int schemaId) {
+        if (cache.size() > 100) {
+            // very simple auto cleanup
+            // schema do not change very often, we just do not want this map 
to grow
+            // without limits
+            cache.clear();

Review comment:
       done
   
   The reason was that I didn't want to add an additional third party 
dependency. (the Guava jar will be bundled into the .nar file).

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java
##########
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.io.kafka;
+
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import 
io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.serializers.subject.TopicNameStrategy;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema;
+import org.apache.pulsar.common.schema.SchemaInfo;
+import org.apache.pulsar.common.schema.SchemaType;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
+
+@Slf4j
+class AvroSchemaCache {
+    private ConcurrentHashMap<Integer, Schema<byte[]>> cache = new 
ConcurrentHashMap<>();
+    private final SchemaRegistryClient schemaRegistryClient;
+
+    public AvroSchemaCache(SchemaRegistryClient schemaRegistryClient) {
+        this.schemaRegistryClient = schemaRegistryClient;
+    }
+
+    public Schema<byte[]> get(int schemaId) {
+        if (cache.size() > 100) {
+            // very simple auto cleanup
+            // schema do not change very often, we just do not want this map 
to grow
+            // without limits
+            cache.clear();
+        }
+        return cache.computeIfAbsent(schemaId, id -> {
+            try {
+                org.apache.avro.Schema schema = 
schemaRegistryClient.getById(schemaId);
+                String definition = schema.toString(false);
+                log.info("Schema {} definition {}", schemaId, definition);
+                return 
Schema.AUTO_PRODUCE_BYTES(GenericAvroSchema.of(SchemaInfo.builder()

Review comment:
       done

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaBytesSource.java
##########
@@ -41,16 +49,80 @@
 @Slf4j
 public class KafkaBytesSource extends KafkaAbstractSource<byte[]> {
 
+    private AvroSchemaCache schemaCache;
+
+    private static final Collection<String> SUPPORTED_KEY_DESERIALIZERS =
+            
Collections.unmodifiableCollection(Arrays.asList(StringDeserializer.class.getName()));
+
+    private static final Collection<String> SUPPORTED_VALUE_DESERIALIZERS =
+            
Collections.unmodifiableCollection(Arrays.asList(ByteArrayDeserializer.class.getName(),
 KafkaAvroDeserializer.class.getName()));
+
     @Override
     protected Properties beforeCreateConsumer(Properties props) {
-        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
StringDeserializer.class.getName());
-        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
ByteArrayDeserializer.class.getName());
+        props.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
StringDeserializer.class.getName());
+        props.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
ByteArrayDeserializer.class.getName());
         log.info("Created kafka consumer config : {}", props);
+
+        String currentKeyDeserializer = 
props.getProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG);
+        if (!SUPPORTED_KEY_DESERIALIZERS.contains(currentKeyDeserializer)) {
+            throw new IllegalArgumentException("Unsupported key deserializer: 
" + currentKeyDeserializer + ", only " + SUPPORTED_KEY_DESERIALIZERS);
+        }
+
+        String currentValueDeserializer = 
props.getProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG);
+        if (!SUPPORTED_VALUE_DESERIALIZERS.contains(currentValueDeserializer)) 
{
+            throw new IllegalArgumentException("Unsupported value 
deserializer: " + currentValueDeserializer + ", only " + 
SUPPORTED_VALUE_DESERIALIZERS);
+        }
+
+        // replace KafkaAvroDeserializer with our custom implementation
+        if (currentValueDeserializer != null && 
currentValueDeserializer.equals(KafkaAvroDeserializer.class.getName())) {
+            props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
SchemaExtractorDeserializer.class.getName());
+            KafkaAvroDeserializerConfig config = new 
KafkaAvroDeserializerConfig(props);
+            List<String> urls = config.getSchemaRegistryUrls();
+            int maxSchemaObject = config.getMaxSchemasPerSubject();
+            SchemaRegistryClient schemaRegistryClient = new 
CachedSchemaRegistryClient(urls, maxSchemaObject);
+            schemaCache = new AvroSchemaCache(schemaRegistryClient);
+        }
         return props;
     }
 
     @Override
-    public byte[] extractValue(ConsumerRecord<String, byte[]> record) {
-        return record.value();
+    public Object extractValue(ConsumerRecord<Object, Object> consumerRecord) {
+        Object value = consumerRecord.value();
+        if (value instanceof BytesWithSchema) {
+            return ((BytesWithSchema) value).getValue();
+        }
+        return value;
     }
+
+    @Override
+    public org.apache.pulsar.client.api.Schema<byte[]> 
extractSchema(ConsumerRecord<Object, Object> consumerRecord) {
+        Object value = consumerRecord.value();
+        if (value instanceof BytesWithSchema) {
+            return schemaCache.get(((BytesWithSchema) value).getSchemaId());
+        } else {
+            return org.apache.pulsar.client.api.Schema.BYTES;
+        }
+    }
+
+    public static class SchemaExtractorDeserializer implements 
Deserializer<BytesWithSchema> {

Review comment:
       done

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/BytesWithSchema.java
##########
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.io.kafka;
+
+import lombok.Value;
+
+/**
+ * This is a wrapper around a Byte array (the Avro encoded record) and a 
schema id in the Kafka Schema Registry.
+ */
+@Value
+public class BytesWithSchema {

Review comment:
       done




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to