sijie commented on a change in pull request #9448:
URL: https://github.com/apache/pulsar/pull/9448#discussion_r589181651



##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/BytesWithSchema.java
##########
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.io.kafka;
+
+import lombok.Value;
+
+/**
+ * This is a wrapper around a Byte array (the Avro encoded record) and a 
schema id in the Kafka Schema Registry.
+ */
+@Value
+public class BytesWithSchema {

Review comment:
       BytesWithKafkaSchema

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java
##########
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.io.kafka;
+
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import 
io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.serializers.subject.TopicNameStrategy;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema;
+import org.apache.pulsar.common.schema.SchemaInfo;
+import org.apache.pulsar.common.schema.SchemaType;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
+
+@Slf4j
+class AvroSchemaCache {
+    private ConcurrentHashMap<Integer, Schema<byte[]>> cache = new 
ConcurrentHashMap<>();

Review comment:
       final

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java
##########
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.io.kafka;
+
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import 
io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.serializers.subject.TopicNameStrategy;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema;
+import org.apache.pulsar.common.schema.SchemaInfo;
+import org.apache.pulsar.common.schema.SchemaType;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
+
+@Slf4j
+class AvroSchemaCache {
+    private ConcurrentHashMap<Integer, Schema<byte[]>> cache = new 
ConcurrentHashMap<>();
+    private final SchemaRegistryClient schemaRegistryClient;
+
+    public AvroSchemaCache(SchemaRegistryClient schemaRegistryClient) {
+        this.schemaRegistryClient = schemaRegistryClient;
+    }
+
+    public Schema<byte[]> get(int schemaId) {
+        if (cache.size() > 100) {
+            // very simple auto cleanup
+            // schema do not change very often, we just do not want this map 
to grow
+            // without limits
+            cache.clear();

Review comment:
       why not use Guava cache?

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java
##########
@@ -151,18 +154,32 @@ public void start() {
         runnerThread.start();
     }
 
-    public abstract V extractValue(ConsumerRecord<String, byte[]> record);
+    public Object extractValue(ConsumerRecord<Object, Object> consumerRecord) {
+        return consumerRecord.value();
+    }
 
+    public Optional<String> extractKey(ConsumerRecord<Object, Object> 
consumerRecord) {
+        // we are currently supporting only String keys
+        return Optional.ofNullable((String) consumerRecord.key());

Review comment:
       If you are introducing a `bytes` connector, let's not assume it is a 
`String`.

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaBytesSource.java
##########
@@ -41,16 +49,80 @@
 @Slf4j
 public class KafkaBytesSource extends KafkaAbstractSource<byte[]> {
 
+    private AvroSchemaCache schemaCache;
+
+    private static final Collection<String> SUPPORTED_KEY_DESERIALIZERS =
+            
Collections.unmodifiableCollection(Arrays.asList(StringDeserializer.class.getName()));
+
+    private static final Collection<String> SUPPORTED_VALUE_DESERIALIZERS =
+            
Collections.unmodifiableCollection(Arrays.asList(ByteArrayDeserializer.class.getName(),
 KafkaAvroDeserializer.class.getName()));
+
     @Override
     protected Properties beforeCreateConsumer(Properties props) {
-        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
StringDeserializer.class.getName());
-        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
ByteArrayDeserializer.class.getName());
+        props.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
StringDeserializer.class.getName());
+        props.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
ByteArrayDeserializer.class.getName());
         log.info("Created kafka consumer config : {}", props);
+
+        String currentKeyDeserializer = 
props.getProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG);
+        if (!SUPPORTED_KEY_DESERIALIZERS.contains(currentKeyDeserializer)) {
+            throw new IllegalArgumentException("Unsupported key deserializer: 
" + currentKeyDeserializer + ", only " + SUPPORTED_KEY_DESERIALIZERS);
+        }
+
+        String currentValueDeserializer = 
props.getProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG);
+        if (!SUPPORTED_VALUE_DESERIALIZERS.contains(currentValueDeserializer)) 
{
+            throw new IllegalArgumentException("Unsupported value 
deserializer: " + currentValueDeserializer + ", only " + 
SUPPORTED_VALUE_DESERIALIZERS);
+        }
+
+        // replace KafkaAvroDeserializer with our custom implementation
+        if (currentValueDeserializer != null && 
currentValueDeserializer.equals(KafkaAvroDeserializer.class.getName())) {
+            props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
SchemaExtractorDeserializer.class.getName());
+            KafkaAvroDeserializerConfig config = new 
KafkaAvroDeserializerConfig(props);
+            List<String> urls = config.getSchemaRegistryUrls();
+            int maxSchemaObject = config.getMaxSchemasPerSubject();
+            SchemaRegistryClient schemaRegistryClient = new 
CachedSchemaRegistryClient(urls, maxSchemaObject);
+            schemaCache = new AvroSchemaCache(schemaRegistryClient);
+        }
         return props;
     }
 
     @Override
-    public byte[] extractValue(ConsumerRecord<String, byte[]> record) {
-        return record.value();
+    public Object extractValue(ConsumerRecord<Object, Object> consumerRecord) {
+        Object value = consumerRecord.value();
+        if (value instanceof BytesWithSchema) {
+            return ((BytesWithSchema) value).getValue();
+        }
+        return value;
     }
+
+    @Override
+    public org.apache.pulsar.client.api.Schema<byte[]> 
extractSchema(ConsumerRecord<Object, Object> consumerRecord) {
+        Object value = consumerRecord.value();
+        if (value instanceof BytesWithSchema) {
+            return schemaCache.get(((BytesWithSchema) value).getSchemaId());
+        } else {
+            return org.apache.pulsar.client.api.Schema.BYTES;
+        }
+    }
+
+    public static class SchemaExtractorDeserializer implements 
Deserializer<BytesWithSchema> {

Review comment:
       Can you rename it as ExtractKafkaAvroSchemaDeserializer? Because this is 
specific to Kafka avro schema.

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java
##########
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.io.kafka;
+
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import 
io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.serializers.subject.TopicNameStrategy;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema;
+import org.apache.pulsar.common.schema.SchemaInfo;
+import org.apache.pulsar.common.schema.SchemaType;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
+
+@Slf4j
+class AvroSchemaCache {
+    private ConcurrentHashMap<Integer, Schema<byte[]>> cache = new 
ConcurrentHashMap<>();
+    private final SchemaRegistryClient schemaRegistryClient;
+
+    public AvroSchemaCache(SchemaRegistryClient schemaRegistryClient) {
+        this.schemaRegistryClient = schemaRegistryClient;
+    }
+
+    public Schema<byte[]> get(int schemaId) {
+        if (cache.size() > 100) {
+            // very simple auto cleanup
+            // schema do not change very often, we just do not want this map 
to grow
+            // without limits
+            cache.clear();
+        }
+        return cache.computeIfAbsent(schemaId, id -> {
+            try {
+                org.apache.avro.Schema schema = 
schemaRegistryClient.getById(schemaId);
+                String definition = schema.toString(false);
+                log.info("Schema {} definition {}", schemaId, definition);
+                return 
Schema.AUTO_PRODUCE_BYTES(GenericAvroSchema.of(SchemaInfo.builder()

Review comment:
       I don't understand why do you need to wrap this using 
`AUTO_PRODUCE_BYTES`. `AUTO_PRODUCE_BYTES` is an expensive implementation 
because it has to deserialize to verify schema. 
   
   You can implement a special schema to deal with BytesWithSchema. 
https://github.com/streamnative/pulsar-io-kafka/blob/master/src/main/java/io/streamnative/connectors/kafka/schema/KafkaAvroSchema.java
   
   

##########
File path: pulsar-io/kafka/src/main/resources/findbugsExclude.xml
##########
@@ -19,4 +19,8 @@
 
 -->
 <FindBugsFilter>
-</FindBugsFilter>
\ No newline at end of file
+    <Match>
+        <Class name="org.apache.pulsar.io.kafka.BytesWithSchema" />
+        <Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />

Review comment:
       Use ByteBuffer then you can avoid this.

##########
File path: 
pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java
##########
@@ -116,19 +117,21 @@ public void close() throws InterruptedException {
         LOG.info("Kafka source stopped.");
     }
 
+    @SuppressWarnings("unchecked")
     public void start() {
         runnerThread = new Thread(() -> {
-            LOG.info("Starting kafka source");
+            LOG.info("Starting kafka source on {}", 
kafkaSourceConfig.getTopic());
             
consumer.subscribe(Collections.singletonList(kafkaSourceConfig.getTopic()));
             LOG.info("Kafka source started.");
-            ConsumerRecords<String, byte[]> consumerRecords;
             while (running) {
-                consumerRecords = consumer.poll(1000);
+                ConsumerRecords<Object, Object> consumerRecords = 
consumer.poll(1000);
                 CompletableFuture<?>[] futures = new 
CompletableFuture<?>[consumerRecords.count()];
                 int index = 0;
-                for (ConsumerRecord<String, byte[]> consumerRecord : 
consumerRecords) {
-                    LOG.debug("Record received from kafka, key: {}. value: 
{}", consumerRecord.key(), consumerRecord.value());
-                    KafkaRecord<V> record = new KafkaRecord<>(consumerRecord, 
extractValue(consumerRecord));
+                for (ConsumerRecord<Object, Object> consumerRecord : 
consumerRecords) {
+                    KafkaRecord record = new KafkaRecord(consumerRecord,
+                            extractKey(consumerRecord),

Review comment:
       any reason why do you extract the key right now? Why can't you extract 
the key lazily when it was accessed via KafkaRecord.getKey?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to