eolivelli commented on a change in pull request #9825: URL: https://github.com/apache/pulsar/pull/9825#discussion_r592946949
########## File path: pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaSinkWrappingProducer.java ########## @@ -0,0 +1,343 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.*; +import org.apache.kafka.common.errors.ProducerFencedException; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.sink.SinkConnector; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.kafka.connect.sink.SinkTask; + +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG; +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.PULSAR_SERVICE_URL_CONFIG; + +/*** + * Adapter from a SinkTask to a KafkaProducer to use producer api to write to the sink. + * + * Supports kafka Producer's config options of + * - linger.ms + * - batch.size + * + * @param <K> + * @param <V> + */ +@Slf4j +public class KafkaSinkWrappingProducer<K, V> implements Producer<K, V> { + + private final SinkConnector connector; + private final SinkTask task; + private final Schema defaultKeySchema; + private final Schema defaultValueSchema; + private final PulsarKafkaSinkContext sinkContext; + private final PulsarKafkaSinkTaskContext taskContext; + private final int batchSize; + private final ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); Review comment: can we give a name ? ########## File path: pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaSinkWrappingProducer.java ########## @@ -0,0 +1,343 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.*; +import org.apache.kafka.common.errors.ProducerFencedException; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.sink.SinkConnector; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.kafka.connect.sink.SinkTask; + +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG; +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.PULSAR_SERVICE_URL_CONFIG; + +/*** + * Adapter from a SinkTask to a KafkaProducer to use producer api to write to the sink. + * + * Supports kafka Producer's config options of + * - linger.ms + * - batch.size + * + * @param <K> + * @param <V> + */ +@Slf4j +public class KafkaSinkWrappingProducer<K, V> implements Producer<K, V> { + + private final SinkConnector connector; + private final SinkTask task; + private final Schema defaultKeySchema; + private final Schema defaultValueSchema; + private final PulsarKafkaSinkContext sinkContext; + private final PulsarKafkaSinkTaskContext taskContext; + private final int batchSize; + private final ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); + private final AtomicInteger numPendingRecords = new AtomicInteger(0); + + private volatile CompletableFuture<Void> pendingFlush = new CompletableFuture<>(); + + private final static Node node = new Node(0, "localhost", 0); + private static final Node[] replicas = new Node[]{node}; + + private static long getLingerMs(Properties props) { + long lingerMs = 2147483647L; // as in kafka + final String lingerPropName = "linger.ms"; + if (props.containsKey(lingerPropName)) { + try { + lingerMs = Long.parseLong((String) props.get(lingerPropName)); + } catch (NumberFormatException nfe) { + log.warn("Could not parse property " + lingerPropName + + " from " + props.get(lingerPropName) + " - will use default", nfe); + } + } + return lingerMs; + } + + private static int getBatchSize(Properties props) { + final String batchSizePropName = "batch.size"; + int batchSize = 1; + if (props.containsKey(batchSizePropName)) { + try { + batchSize = Math.max(batchSize, Integer.parseInt((String) props.get(batchSizePropName))); + } catch (NumberFormatException nfe) { + log.warn("Could not parse property " + batchSizePropName + + " from " + props.get(batchSizePropName) + " - will use default", nfe); + } + } + + return batchSize; + } + + public static <K, V> Producer<K, V> create(String kafkaConnectorFQClassName, + Properties props, + Schema keySchema, + Schema valueSchema) { + try { + Class<?> clazz = Class.forName(kafkaConnectorFQClassName); + SinkConnector connector = (SinkConnector) clazz.getConstructor().newInstance(); + + Class<? extends Task> taskClass = connector.taskClass(); + PulsarKafkaSinkContext sinkContext = new PulsarKafkaSinkContext(); + connector.initialize(sinkContext); + connector.start(Maps.fromProperties(props)); + + List<Map<String, String>> configs = connector.taskConfigs(1); + configs.forEach(x -> { + x.put(OFFSET_STORAGE_TOPIC_CONFIG, props.getProperty(OFFSET_STORAGE_TOPIC_CONFIG)); + x.put(PULSAR_SERVICE_URL_CONFIG, props.getProperty(PULSAR_SERVICE_URL_CONFIG)); + }); + SinkTask task = (SinkTask) taskClass.getConstructor().newInstance(); + PulsarKafkaSinkTaskContext taskContext = + new PulsarKafkaSinkTaskContext(configs.get(0), task::open); + task.initialize(taskContext); + task.start(configs.get(0)); + + Producer<K, V> producer = new KafkaSinkWrappingProducer<>(connector, task, + keySchema, valueSchema, + sinkContext, taskContext, props); + + return producer; + } catch (Exception e) { + log.error("Failed to create KafkaSinkWrappingProducer with {}, {} & {}", + props, keySchema.name(), valueSchema.name(), e); + throw new IllegalArgumentException("failed to create KafkaSink with given parameters", e); + } + } + + private KafkaSinkWrappingProducer(SinkConnector connector, + SinkTask task, + Schema defaultKeySchema, + Schema defaultValueSchema, + PulsarKafkaSinkContext sinkContext, + PulsarKafkaSinkTaskContext taskContext, + Properties props) { + this.connector = connector; + this.task = task; + this.defaultKeySchema = defaultKeySchema; + this.defaultValueSchema = defaultValueSchema; + this.sinkContext = sinkContext; + this.taskContext = taskContext; + this.batchSize = getBatchSize(props); + + long lingerMs = getLingerMs(props); + scheduledExecutor.scheduleAtFixedRate(() -> this.flushIfNeeded(true), lingerMs, lingerMs, TimeUnit.MILLISECONDS); Review comment: we are not shutting down this `scheduledExecutor` can you please shut it down in the `close` method ? ########## File path: pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaSinkWrappingProducer.java ########## @@ -0,0 +1,343 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.*; +import org.apache.kafka.common.errors.ProducerFencedException; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.sink.SinkConnector; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.kafka.connect.sink.SinkTask; + +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG; +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.PULSAR_SERVICE_URL_CONFIG; + +/*** + * Adapter from a SinkTask to a KafkaProducer to use producer api to write to the sink. + * + * Supports kafka Producer's config options of + * - linger.ms + * - batch.size + * + * @param <K> + * @param <V> + */ +@Slf4j +public class KafkaSinkWrappingProducer<K, V> implements Producer<K, V> { + + private final SinkConnector connector; + private final SinkTask task; + private final Schema defaultKeySchema; + private final Schema defaultValueSchema; + private final PulsarKafkaSinkContext sinkContext; + private final PulsarKafkaSinkTaskContext taskContext; + private final int batchSize; + private final ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); + private final AtomicInteger numPendingRecords = new AtomicInteger(0); + + private volatile CompletableFuture<Void> pendingFlush = new CompletableFuture<>(); + + private final static Node node = new Node(0, "localhost", 0); + private static final Node[] replicas = new Node[]{node}; + + private static long getLingerMs(Properties props) { + long lingerMs = 2147483647L; // as in kafka + final String lingerPropName = "linger.ms"; + if (props.containsKey(lingerPropName)) { + try { + lingerMs = Long.parseLong((String) props.get(lingerPropName)); + } catch (NumberFormatException nfe) { + log.warn("Could not parse property " + lingerPropName + + " from " + props.get(lingerPropName) + " - will use default", nfe); + } + } + return lingerMs; + } + + private static int getBatchSize(Properties props) { + final String batchSizePropName = "batch.size"; + int batchSize = 1; + if (props.containsKey(batchSizePropName)) { + try { + batchSize = Math.max(batchSize, Integer.parseInt((String) props.get(batchSizePropName))); + } catch (NumberFormatException nfe) { + log.warn("Could not parse property " + batchSizePropName + + " from " + props.get(batchSizePropName) + " - will use default", nfe); + } + } + + return batchSize; + } + + public static <K, V> Producer<K, V> create(String kafkaConnectorFQClassName, + Properties props, + Schema keySchema, + Schema valueSchema) { + try { + Class<?> clazz = Class.forName(kafkaConnectorFQClassName); + SinkConnector connector = (SinkConnector) clazz.getConstructor().newInstance(); + + Class<? extends Task> taskClass = connector.taskClass(); + PulsarKafkaSinkContext sinkContext = new PulsarKafkaSinkContext(); + connector.initialize(sinkContext); + connector.start(Maps.fromProperties(props)); + + List<Map<String, String>> configs = connector.taskConfigs(1); + configs.forEach(x -> { + x.put(OFFSET_STORAGE_TOPIC_CONFIG, props.getProperty(OFFSET_STORAGE_TOPIC_CONFIG)); + x.put(PULSAR_SERVICE_URL_CONFIG, props.getProperty(PULSAR_SERVICE_URL_CONFIG)); + }); + SinkTask task = (SinkTask) taskClass.getConstructor().newInstance(); + PulsarKafkaSinkTaskContext taskContext = + new PulsarKafkaSinkTaskContext(configs.get(0), task::open); + task.initialize(taskContext); + task.start(configs.get(0)); + + Producer<K, V> producer = new KafkaSinkWrappingProducer<>(connector, task, + keySchema, valueSchema, + sinkContext, taskContext, props); + + return producer; + } catch (Exception e) { + log.error("Failed to create KafkaSinkWrappingProducer with {}, {} & {}", + props, keySchema.name(), valueSchema.name(), e); + throw new IllegalArgumentException("failed to create KafkaSink with given parameters", e); + } + } + + private KafkaSinkWrappingProducer(SinkConnector connector, + SinkTask task, + Schema defaultKeySchema, + Schema defaultValueSchema, + PulsarKafkaSinkContext sinkContext, + PulsarKafkaSinkTaskContext taskContext, + Properties props) { + this.connector = connector; + this.task = task; + this.defaultKeySchema = defaultKeySchema; + this.defaultValueSchema = defaultValueSchema; + this.sinkContext = sinkContext; + this.taskContext = taskContext; + this.batchSize = getBatchSize(props); + + long lingerMs = getLingerMs(props); + scheduledExecutor.scheduleAtFixedRate(() -> this.flushIfNeeded(true), lingerMs, lingerMs, TimeUnit.MILLISECONDS); + } + + @Override + public void initTransactions() { + throw new UnsupportedOperationException("not supported"); + } + + @Override + public void beginTransaction() throws ProducerFencedException { + throw new UnsupportedOperationException("not supported"); + } + + @Override + public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> map, String s) + throws ProducerFencedException { + throw new UnsupportedOperationException("not supported"); + } + + @Override + public void commitTransaction() throws ProducerFencedException { + throw new UnsupportedOperationException("not supported"); + } + + @Override + public void abortTransaction() throws ProducerFencedException { + throw new UnsupportedOperationException("not supported"); + } + + private SinkRecord toSinkRecord(ProducerRecord<K, V> producerRecord) { + int partition = producerRecord.partition() == null ? 0 : producerRecord.partition(); + Schema keySchema = defaultKeySchema; + Schema valueSchema = defaultValueSchema; + + if (producerRecord instanceof ProducerRecordWithSchema) { + ProducerRecordWithSchema rec = (ProducerRecordWithSchema) producerRecord; + keySchema = rec.getKeySchema(); + valueSchema = rec.getValueSchema(); + } + + long offset = taskContext.currentOffset(producerRecord.topic(), partition).incrementAndGet(); + SinkRecord sinkRecord = new SinkRecord(producerRecord.topic(), + partition, + keySchema, + producerRecord.key(), + valueSchema, + producerRecord.value(), + offset, + producerRecord.timestamp(), + TimestampType.NO_TIMESTAMP_TYPE); + return sinkRecord; + } + + @Override + public Future<RecordMetadata> send(ProducerRecord<K, V> producerRecord) { + sinkContext.throwIfNeeded(); + CompletableFuture<RecordMetadata> result = new CompletableFuture<>(); + try { + task.put(Lists.newArrayList(toSinkRecord(producerRecord))); + } catch (Exception ex) { + result.completeExceptionally(ex); + return result; + } + pendingFlush.whenComplete((ignore, ex) -> { + if (ex == null) { + result.complete(null); + } else { + result.completeExceptionally(ex); + } + }); + numPendingRecords.incrementAndGet(); + flushIfNeeded(false); + return result; + } + + @Override + public Future<RecordMetadata> send(ProducerRecord<K, V> producerRecord, Callback callback) { + sinkContext.throwIfNeeded(); + CompletableFuture<RecordMetadata> result = new CompletableFuture<>(); + result.whenComplete((ignore, ex) -> { + if (ex == null) { + callback.onCompletion(null, null); + } else { + if (ex instanceof Exception) { + callback.onCompletion(null, (Exception) ex); + } else { + callback.onCompletion(null, new Exception(ex)); + } + } + }); + + try { + task.put(Lists.newArrayList(toSinkRecord(producerRecord))); + } catch (Exception ex) { + result.completeExceptionally(ex); + return result; + } + pendingFlush.whenComplete((ignore, ex) -> { + if (ex == null) { + result.complete(null); + } else { + result.completeExceptionally(ex); + } + }); + numPendingRecords.incrementAndGet(); + flushIfNeeded(false); + return result; + } + + private void flushIfNeeded(boolean force) { + if (force || numPendingRecords.get() >= batchSize) { + flush(); + } + } + + @Override + public void flush() { + if (log.isDebugEnabled()) { + log.debug("flush requested, pending: {}, batchSize: {}", + numPendingRecords.get(), batchSize); + } + + sinkContext.throwIfNeeded(); + + if (numPendingRecords.getAndSet(0) == 0) { + return; + } + + Map<TopicPartition, OffsetAndMetadata> currentOffsets = taskContext.currentOffsets(); + CompletableFuture<Void> flushCf; + synchronized (this) { + flushCf = pendingFlush; + pendingFlush = new CompletableFuture<>(); + } + + try { + task.flush(currentOffsets); + taskContext.flushOffsets(currentOffsets); + flushCf.complete(null); + } catch (Throwable t) { + log.error("error flushing pending records", t); + flushCf.completeExceptionally(t); + } + } + + @Override + public List<PartitionInfo> partitionsFor(String topic) { + sinkContext.throwIfNeeded(); + return taskContext.assignment().stream() + .filter(x -> x.topic().equals(topic)) + .map(x -> new PartitionInfo(x.topic(), x.partition(), node, replicas, replicas)) + .collect(Collectors.toList()); + } + + @VisibleForTesting + long currentOffset(String topic, int partition) { + return taskContext.currentOffset(topic, partition).get(); + } + + @Override + public Map<MetricName, ? extends Metric> metrics() { + sinkContext.throwIfNeeded(); + return null; Review comment: what about returning an empty map or throwing UnsupportedOperationException ? ########## File path: pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaSinkWrappingProducerTest.java ########## @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.file.FileStreamSourceConnector; +import org.apache.kafka.connect.runtime.TaskConfig; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.Assert.assertEquals; + +public class KafkaSinkWrappingProducerTest extends ProducerConsumerBase { + + private String offsetTopicName = "persistent://my-property/my-ns/kafka-connect-sink-offset"; + + private Path file; + private Properties props; + + @BeforeMethod + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + + file = Paths.get(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); + + props = new Properties(); + props.put("file", file.toString()); + props.put(PulsarKafkaWorkerConfig.PULSAR_SERVICE_URL_CONFIG, brokerUrl.toString()); + props.put(PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG, offsetTopicName); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + if (file != null) { + //Files.delete(file); + } + + super.internalCleanup(); + } + + @Test + public void smokeTest() throws Exception { + Producer<String, String> producer = + KafkaSinkWrappingProducer.create( + "org.apache.kafka.connect.file.FileStreamSinkConnector", + props, + Schema.STRING_SCHEMA, + Schema.STRING_SCHEMA); + + ProducerRecord<String, String> record = new ProducerRecord<>("test", + "key", "value"); + + final AtomicInteger status = new AtomicInteger(0); + + producer.send(record, (metadata, exception) -> { + if (exception == null) { + status.incrementAndGet(); + } else { + System.out.println(exception.toString()); + exception.printStackTrace(); Review comment: nit: use logger ? ########## File path: pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/ProducerRecordWithSchema.java ########## @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import lombok.EqualsAndHashCode; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.connect.data.Schema; + +@EqualsAndHashCode(callSuper=true) +public class ProducerRecordWithSchema<K, V> extends ProducerRecord<K, V> { + + final Schema keySchema; + final Schema valueSchema; + + public ProducerRecordWithSchema(String topic, Integer partition, Long timestamp, K key, V value, Review comment: do we need to implement all of these constructors ? probably we need only one ########## File path: pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaSinkWrappingProducerTest.java ########## @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.file.FileStreamSourceConnector; +import org.apache.kafka.connect.runtime.TaskConfig; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.Assert.assertEquals; + +public class KafkaSinkWrappingProducerTest extends ProducerConsumerBase { + + private String offsetTopicName = "persistent://my-property/my-ns/kafka-connect-sink-offset"; + + private Path file; + private Properties props; + + @BeforeMethod + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + + file = Paths.get(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); + + props = new Properties(); + props.put("file", file.toString()); + props.put(PulsarKafkaWorkerConfig.PULSAR_SERVICE_URL_CONFIG, brokerUrl.toString()); + props.put(PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG, offsetTopicName); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + if (file != null) { + //Files.delete(file); Review comment: please on comment this line ########## File path: pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java ########## @@ -49,7 +53,11 @@ @Override public void write(Record<byte[]> sourceRecord) { KeyValue<K, V> keyValue = extractKeyValue(sourceRecord); - ProducerRecord<K, V> record = new ProducerRecord<>(kafkaSinkConfig.getTopic(), keyValue.getKey(), keyValue.getValue()); + KeyValue<Schema, Schema> keyValueSchemas = extractKeyValueSchemas(sourceRecord); Review comment: Probably using a KeyValue<Schema, Schema> here may be misleading, what about using a "Pair" ? I mean, that usually KeyValue is the class we use ad content of the Record, here you are using it as a simple Pair, to return two objects from the extractKeyValueSchemas method ########## File path: pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarKafkaSinkTaskContext.java ########## @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.connect.sink.SinkTaskContext; +import org.apache.kafka.connect.storage.OffsetBackingStore; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.TOPIC_NAMESPACE_CONFIG; + +@Slf4j +public class PulsarKafkaSinkTaskContext implements SinkTaskContext { + + private final Map<String, String> config; + + private final OffsetBackingStore offsetStore; + private final String topicNamespace; + private final Consumer<Collection<TopicPartition>> onPartitionChange; + private final AtomicBoolean runRepartition = new AtomicBoolean(false); + + private final ConcurrentHashMap<TopicPartition, AtomicLong> currentOffsets = new ConcurrentHashMap<>(); + + public PulsarKafkaSinkTaskContext(Map<String, String> config, + Consumer<Collection<TopicPartition>> onPartitionChange) { + this.config = config; + + offsetStore = new PulsarOffsetBackingStore(); + PulsarKafkaWorkerConfig pulsarKafkaWorkerConfig = new PulsarKafkaWorkerConfig(config); + offsetStore.configure(pulsarKafkaWorkerConfig); + offsetStore.start(); + + this.onPartitionChange = onPartitionChange; + this.topicNamespace = pulsarKafkaWorkerConfig.getString(TOPIC_NAMESPACE_CONFIG); + } + + public void close() { + offsetStore.stop(); + } + + @Override + public Map<String, String> configs() { + return config; + } + + public AtomicLong currentOffset(String topic, int partition) { + return currentOffset(new TopicPartition(topic, partition)); + } + + public AtomicLong currentOffset(TopicPartition topicPartition) { + AtomicLong offset = currentOffsets.computeIfAbsent(topicPartition, kv -> { + List<ByteBuffer> req = Lists.newLinkedList(); + ByteBuffer key = topicPartitionAsKey(topicPartition); + req.add(key); + CompletableFuture<Long> offsetFuture = new CompletableFuture<>(); + offsetStore.get(req, (Throwable ex, Map<ByteBuffer, ByteBuffer> result) -> { + if (ex == null) { + if (result != null && result.size() != 0) { + Optional<ByteBuffer> val = result.entrySet().stream() + .filter(entry -> entry.getKey().equals(key)) + .findFirst().map(entry -> entry.getValue()); + if (val.isPresent()) { + long received = val.get().getLong(); + if (log.isDebugEnabled()) { + log.debug("read initial offset for {} == {}", topicPartition, received); + } + offsetFuture.complete(received); + return; + } + } + offsetFuture.complete(-1L); + } else { + offsetFuture.completeExceptionally(ex); + } + }); + + runRepartition.set(true); + try { + return new AtomicLong(offsetFuture.get()); + } catch (Exception e) { + log.error("error getting initial state of " + topicPartition.toString(), e); + return new AtomicLong(-1L); + } + }); + if (runRepartition.compareAndSet(true, false)) { + onPartitionChange.accept(currentOffsets.keySet()); + } + return offset; + } + + public Map<TopicPartition, OffsetAndMetadata> currentOffsets() { + + Map<TopicPartition, OffsetAndMetadata> snapshot = Maps.newHashMapWithExpectedSize(currentOffsets.size()); + currentOffsets.forEach((topicPartition, offset) -> { + snapshot.put(topicPartition, + new OffsetAndMetadata(offset.get(), Optional.empty(), null)); + }); + return snapshot; + } + + private ByteBuffer topicPartitionAsKey(TopicPartition topicPartition) { Review comment: static ? ########## File path: pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaSinkWrappingProducerTest.java ########## @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.file.FileStreamSourceConnector; +import org.apache.kafka.connect.runtime.TaskConfig; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.Assert.assertEquals; + +public class KafkaSinkWrappingProducerTest extends ProducerConsumerBase { + + private String offsetTopicName = "persistent://my-property/my-ns/kafka-connect-sink-offset"; + + private Path file; + private Properties props; + + @BeforeMethod + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + + file = Paths.get(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); Review comment: what about Files.createTemporaryDirectory ? ########## File path: pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaSinkWrappingProducer.java ########## @@ -0,0 +1,343 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.*; +import org.apache.kafka.common.errors.ProducerFencedException; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.sink.SinkConnector; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.kafka.connect.sink.SinkTask; + +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG; +import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.PULSAR_SERVICE_URL_CONFIG; + +/*** + * Adapter from a SinkTask to a KafkaProducer to use producer api to write to the sink. + * + * Supports kafka Producer's config options of + * - linger.ms + * - batch.size + * + * @param <K> + * @param <V> + */ +@Slf4j +public class KafkaSinkWrappingProducer<K, V> implements Producer<K, V> { + + private final SinkConnector connector; + private final SinkTask task; + private final Schema defaultKeySchema; + private final Schema defaultValueSchema; + private final PulsarKafkaSinkContext sinkContext; + private final PulsarKafkaSinkTaskContext taskContext; + private final int batchSize; + private final ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); + private final AtomicInteger numPendingRecords = new AtomicInteger(0); + + private volatile CompletableFuture<Void> pendingFlush = new CompletableFuture<>(); + + private final static Node node = new Node(0, "localhost", 0); + private static final Node[] replicas = new Node[]{node}; + + private static long getLingerMs(Properties props) { + long lingerMs = 2147483647L; // as in kafka + final String lingerPropName = "linger.ms"; + if (props.containsKey(lingerPropName)) { + try { + lingerMs = Long.parseLong((String) props.get(lingerPropName)); + } catch (NumberFormatException nfe) { + log.warn("Could not parse property " + lingerPropName + + " from " + props.get(lingerPropName) + " - will use default", nfe); + } + } + return lingerMs; + } + + private static int getBatchSize(Properties props) { + final String batchSizePropName = "batch.size"; + int batchSize = 1; + if (props.containsKey(batchSizePropName)) { + try { + batchSize = Math.max(batchSize, Integer.parseInt((String) props.get(batchSizePropName))); + } catch (NumberFormatException nfe) { + log.warn("Could not parse property " + batchSizePropName + + " from " + props.get(batchSizePropName) + " - will use default", nfe); + } + } + + return batchSize; + } + + public static <K, V> Producer<K, V> create(String kafkaConnectorFQClassName, + Properties props, + Schema keySchema, + Schema valueSchema) { + try { + Class<?> clazz = Class.forName(kafkaConnectorFQClassName); + SinkConnector connector = (SinkConnector) clazz.getConstructor().newInstance(); + + Class<? extends Task> taskClass = connector.taskClass(); + PulsarKafkaSinkContext sinkContext = new PulsarKafkaSinkContext(); + connector.initialize(sinkContext); + connector.start(Maps.fromProperties(props)); + + List<Map<String, String>> configs = connector.taskConfigs(1); + configs.forEach(x -> { + x.put(OFFSET_STORAGE_TOPIC_CONFIG, props.getProperty(OFFSET_STORAGE_TOPIC_CONFIG)); + x.put(PULSAR_SERVICE_URL_CONFIG, props.getProperty(PULSAR_SERVICE_URL_CONFIG)); + }); + SinkTask task = (SinkTask) taskClass.getConstructor().newInstance(); + PulsarKafkaSinkTaskContext taskContext = + new PulsarKafkaSinkTaskContext(configs.get(0), task::open); + task.initialize(taskContext); + task.start(configs.get(0)); + + Producer<K, V> producer = new KafkaSinkWrappingProducer<>(connector, task, + keySchema, valueSchema, + sinkContext, taskContext, props); + + return producer; + } catch (Exception e) { + log.error("Failed to create KafkaSinkWrappingProducer with {}, {} & {}", + props, keySchema.name(), valueSchema.name(), e); + throw new IllegalArgumentException("failed to create KafkaSink with given parameters", e); + } + } + + private KafkaSinkWrappingProducer(SinkConnector connector, + SinkTask task, + Schema defaultKeySchema, + Schema defaultValueSchema, + PulsarKafkaSinkContext sinkContext, + PulsarKafkaSinkTaskContext taskContext, + Properties props) { + this.connector = connector; + this.task = task; + this.defaultKeySchema = defaultKeySchema; + this.defaultValueSchema = defaultValueSchema; + this.sinkContext = sinkContext; + this.taskContext = taskContext; + this.batchSize = getBatchSize(props); + + long lingerMs = getLingerMs(props); + scheduledExecutor.scheduleAtFixedRate(() -> this.flushIfNeeded(true), lingerMs, lingerMs, TimeUnit.MILLISECONDS); + } + + @Override + public void initTransactions() { + throw new UnsupportedOperationException("not supported"); + } + + @Override + public void beginTransaction() throws ProducerFencedException { + throw new UnsupportedOperationException("not supported"); + } + + @Override + public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> map, String s) + throws ProducerFencedException { + throw new UnsupportedOperationException("not supported"); + } + + @Override + public void commitTransaction() throws ProducerFencedException { + throw new UnsupportedOperationException("not supported"); + } + + @Override + public void abortTransaction() throws ProducerFencedException { + throw new UnsupportedOperationException("not supported"); + } + + private SinkRecord toSinkRecord(ProducerRecord<K, V> producerRecord) { + int partition = producerRecord.partition() == null ? 0 : producerRecord.partition(); + Schema keySchema = defaultKeySchema; + Schema valueSchema = defaultValueSchema; + + if (producerRecord instanceof ProducerRecordWithSchema) { + ProducerRecordWithSchema rec = (ProducerRecordWithSchema) producerRecord; + keySchema = rec.getKeySchema(); + valueSchema = rec.getValueSchema(); + } + + long offset = taskContext.currentOffset(producerRecord.topic(), partition).incrementAndGet(); + SinkRecord sinkRecord = new SinkRecord(producerRecord.topic(), + partition, + keySchema, + producerRecord.key(), + valueSchema, + producerRecord.value(), + offset, + producerRecord.timestamp(), + TimestampType.NO_TIMESTAMP_TYPE); + return sinkRecord; + } + + @Override + public Future<RecordMetadata> send(ProducerRecord<K, V> producerRecord) { + sinkContext.throwIfNeeded(); + CompletableFuture<RecordMetadata> result = new CompletableFuture<>(); + try { + task.put(Lists.newArrayList(toSinkRecord(producerRecord))); + } catch (Exception ex) { + result.completeExceptionally(ex); + return result; + } + pendingFlush.whenComplete((ignore, ex) -> { + if (ex == null) { + result.complete(null); + } else { + result.completeExceptionally(ex); + } + }); + numPendingRecords.incrementAndGet(); + flushIfNeeded(false); + return result; + } + + @Override + public Future<RecordMetadata> send(ProducerRecord<K, V> producerRecord, Callback callback) { + sinkContext.throwIfNeeded(); + CompletableFuture<RecordMetadata> result = new CompletableFuture<>(); + result.whenComplete((ignore, ex) -> { + if (ex == null) { + callback.onCompletion(null, null); + } else { + if (ex instanceof Exception) { + callback.onCompletion(null, (Exception) ex); + } else { + callback.onCompletion(null, new Exception(ex)); + } + } + }); + + try { + task.put(Lists.newArrayList(toSinkRecord(producerRecord))); + } catch (Exception ex) { + result.completeExceptionally(ex); + return result; + } + pendingFlush.whenComplete((ignore, ex) -> { + if (ex == null) { + result.complete(null); + } else { + result.completeExceptionally(ex); + } + }); + numPendingRecords.incrementAndGet(); + flushIfNeeded(false); + return result; + } + + private void flushIfNeeded(boolean force) { + if (force || numPendingRecords.get() >= batchSize) { + flush(); + } + } + + @Override + public void flush() { + if (log.isDebugEnabled()) { + log.debug("flush requested, pending: {}, batchSize: {}", + numPendingRecords.get(), batchSize); + } + + sinkContext.throwIfNeeded(); + + if (numPendingRecords.getAndSet(0) == 0) { + return; + } + + Map<TopicPartition, OffsetAndMetadata> currentOffsets = taskContext.currentOffsets(); + CompletableFuture<Void> flushCf; + synchronized (this) { + flushCf = pendingFlush; + pendingFlush = new CompletableFuture<>(); + } + + try { + task.flush(currentOffsets); + taskContext.flushOffsets(currentOffsets); + flushCf.complete(null); + } catch (Throwable t) { + log.error("error flushing pending records", t); + flushCf.completeExceptionally(t); + } + } + + @Override + public List<PartitionInfo> partitionsFor(String topic) { Review comment: do we need to implement this method ? if we are not calling it from our KafkaSink than we could simply throw UnsupportedOperationException ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
