urbandan commented on a change in pull request #4730:
URL: https://github.com/apache/nifi/pull/4730#discussion_r561093484



##########
File path: 
nifi-external/nifi-kafka-connect/nifi-kafka-connector/src/main/java/org/apache/nifi/kafka/connect/StatelessNiFiSinkTask.java
##########
@@ -0,0 +1,329 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.kafka.connect;
+
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.connect.errors.RetriableException;
+import org.apache.kafka.connect.header.Header;
+import org.apache.kafka.connect.sink.SinkRecord;
+import org.apache.kafka.connect.sink.SinkTask;
+import org.apache.nifi.controller.queue.QueueSize;
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.stateless.flow.DataflowTrigger;
+import org.apache.nifi.stateless.flow.StatelessDataflow;
+import org.apache.nifi.stateless.flow.TriggerResult;
+import org.apache.nifi.util.FormatUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.charset.StandardCharsets;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Pattern;
+
+public class StatelessNiFiSinkTask extends SinkTask {
+    private static final Logger logger = 
LoggerFactory.getLogger(StatelessNiFiSinkTask.class);
+
+    private StatelessDataflow dataflow;
+    private String inputPortName;
+    private Set<String> failurePortNames;
+    private long timeoutMillis;
+    private Pattern headerNameRegex;
+    private String headerNamePrefix;
+    private int batchSize;
+    private long batchBytes;
+    private QueueSize queueSize;
+    private String dataflowName;
+
+    private long backoffMillis = 0L;
+    private boolean lastTriggerSuccessful = true;
+    private ExecutorService backgroundTriggerExecutor;
+
+    @Override
+    public String version() {
+        return StatelessKafkaConnectorUtil.getVersion();
+    }
+
+    @Override
+    public void start(final Map<String, String> properties) {
+        logger.info("Starting Sink Task with properties {}", 
StatelessKafkaConnectorUtil.getLoggableProperties(properties));
+
+        final String timeout = 
properties.getOrDefault(StatelessKafkaConnectorUtil.DATAFLOW_TIMEOUT, 
StatelessKafkaConnectorUtil.DEFAULT_DATAFLOW_TIMEOUT);
+        timeoutMillis = (long) FormatUtils.getPreciseTimeDuration(timeout, 
TimeUnit.MILLISECONDS);
+
+        dataflowName = 
properties.get(StatelessKafkaConnectorUtil.DATAFLOW_NAME);
+
+        final String regex = 
properties.get(StatelessNiFiSinkConnector.HEADERS_AS_ATTRIBUTES_REGEX);
+        headerNameRegex = regex == null ? null : Pattern.compile(regex);
+        headerNamePrefix = 
properties.getOrDefault(StatelessNiFiSinkConnector.HEADER_ATTRIBUTE_NAME_PREFIX,
 "");
+
+        batchSize = 
Integer.parseInt(properties.getOrDefault(StatelessNiFiSinkConnector.BATCH_SIZE_COUNT,
 "0"));
+        batchBytes = 
Long.parseLong(properties.getOrDefault(StatelessNiFiSinkConnector.BATCH_SIZE_BYTES,
 "0"));
+
+        dataflow = StatelessKafkaConnectorUtil.createDataflow(properties);
+
+        // Determine input port name. If input port is explicitly set, use the 
value given. Otherwise, if only one port exists, use that. Otherwise, throw 
ConfigException.
+        final String dataflowName = 
properties.get(StatelessKafkaConnectorUtil.DATAFLOW_NAME);
+        inputPortName = 
properties.get(StatelessNiFiSinkConnector.INPUT_PORT_NAME);
+        if (inputPortName == null) {
+            final Set<String> inputPorts = dataflow.getInputPortNames();
+            if (inputPorts.isEmpty()) {
+                throw new ConfigException("The dataflow specified for <" + 
dataflowName + "> does not have an Input Port at the root level. Dataflows used 
for a Kafka Connect Sink Task "
+                    + "must have at least one Input Port at the root level.");
+            }
+
+            if (inputPorts.size() > 1) {
+                throw new ConfigException("The dataflow specified for <" + 
dataflowName + "> has multiple Input Ports at the root level (" + 
inputPorts.toString()
+                    + "). The " + StatelessNiFiSinkConnector.INPUT_PORT_NAME + 
" property must be set to indicate which of these Ports Kafka records should be 
sent to.");
+            }
+
+            inputPortName = inputPorts.iterator().next();
+        }
+
+        // Validate the input port
+        if (!dataflow.getInputPortNames().contains(inputPortName)) {
+            throw new ConfigException("The dataflow specified for <" + 
dataflowName + "> does not have Input Port with name <" + inputPortName + "> at 
the root level. Existing Input Port names are "
+                + dataflow.getInputPortNames());
+        }
+
+        // Determine the failure Ports, if any are given.
+        final String failurePortList = 
properties.get(StatelessNiFiSinkConnector.FAILURE_PORTS);
+        if (failurePortList == null || failurePortList.trim().isEmpty()) {
+            failurePortNames = Collections.emptySet();
+        } else {
+            failurePortNames = new HashSet<>();
+
+            final String[] names = failurePortList.split(",");
+            for (final String name : names) {
+                final String trimmed = name.trim();
+                failurePortNames.add(trimmed);
+            }
+        }
+
+        // Validate the failure ports
+        final Set<String> outputPortNames = dataflow.getOutputPortNames();
+        for (final String failurePortName : failurePortNames) {
+            if (!outputPortNames.contains(failurePortName)) {
+                throw new ConfigException("Dataflow was configured with a 
Failure Port of " + failurePortName
+                    + " but there is no Port with that name in the dataflow. 
Valid Port names are " + outputPortNames);
+            }
+        }
+
+        backgroundTriggerExecutor = Executors.newFixedThreadPool(1, r -> {
+            final Thread thread = 
Executors.defaultThreadFactory().newThread(r);
+            thread.setName("Execute dataflow " + dataflowName);
+            return thread;
+        });
+    }
+
+    @Override
+    public void put(final Collection<SinkRecord> records) {
+        if (!lastTriggerSuccessful) {
+            // When Kafka Connect calls #put, it expects the method to return 
quickly. If the dataflow should be triggered, it needs to be triggered
+            // in a background thread. When that happens, it's possible that 
the dataflow could fail, in which case we'd want to rollback and re-deliver
+            // the messages. Because the background thread cannot readily do 
that, it sets a flag, `lastTriggerSuccessful = false`. We check this here,
+            // so that if a background task failed, we can throw a 
RetriableException, resulting in the messages being requeued. Because of that, 
we ensure
+            // that any time that we set lastTriggerSuccessful, we also purge 
any data in the dataflow, so that it can be redelivered and retried.
+
+            lastTriggerSuccessful = true; // We don't want to throw a 
RetriableException again.
+            throw new RetriableException("Last attempt to trigger dataflow 
failed");
+        }
+
+        logger.debug("Enqueuing {} Kafka messages", records.size());
+
+        for (final SinkRecord record : records) {
+            final Map<String, String> attributes = createAttributes(record);
+            final byte[] contents = getContents(record.value());
+
+            queueSize = dataflow.enqueue(contents, attributes, inputPortName);
+        }
+
+        // If we haven't reached the preferred back size, return.
+        if (queueSize == null || queueSize.getObjectCount() < batchSize) {
+            return;
+        }
+        if (queueSize.getByteCount() < batchBytes) {
+            return;
+        }
+
+        logger.debug("Triggering dataflow in background thread");
+
+        backgroundTriggerExecutor.submit(this::triggerDataflow);

Review comment:
       As we discussed offline, would be preferable to do the trigger in flush, 
and only enqueue in put.

##########
File path: 
nifi-external/nifi-kafka-connect/nifi-kafka-connector/src/main/java/org/apache/nifi/kafka/connect/StatelessNiFiSourceTask.java
##########
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.kafka.connect;
+
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.errors.RetriableException;
+import org.apache.kafka.connect.header.ConnectHeaders;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.apache.kafka.connect.source.SourceTask;
+import org.apache.nifi.components.state.Scope;
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.stateless.flow.DataflowTrigger;
+import org.apache.nifi.stateless.flow.StatelessDataflow;
+import org.apache.nifi.stateless.flow.TriggerResult;
+import org.apache.nifi.util.FormatUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.regex.Pattern;
+
+public class StatelessNiFiSourceTask extends SourceTask {
+    public static final String STATE_MAP_KEY = "task.index";
+    private static final Logger logger = 
LoggerFactory.getLogger(StatelessNiFiSourceTask.class);
+
+    private StatelessDataflow dataflow;
+    private String outputPortName;
+    private String topicName;
+    private String topicNameAttribute;
+    private TriggerResult triggerResult;
+    private String keyAttributeName;
+    private Pattern headerAttributeNamePattern;
+    private long timeoutMillis;
+    private String dataflowName;
+    private long failureYieldExpiration = 0L;
+
+    private final Map<String, String> clusterStatePartitionMap = 
Collections.singletonMap(STATE_MAP_KEY, "CLUSTER");
+    private Map<String, String> localStatePartitionMap = new HashMap<>();
+    private boolean primaryNodeOnly;
+    private boolean primaryNodeTask;
+
+    private final AtomicLong unacknowledgedRecords = new AtomicLong(0L);
+
+    @Override
+    public String version() {
+        return StatelessKafkaConnectorUtil.getVersion();
+    }
+
+    @Override
+    public void start(final Map<String, String> properties) {
+        logger.info("Starting Source Task with properties {}", 
StatelessKafkaConnectorUtil.getLoggableProperties(properties));
+
+        final String timeout = 
properties.getOrDefault(StatelessKafkaConnectorUtil.DATAFLOW_TIMEOUT, 
StatelessKafkaConnectorUtil.DEFAULT_DATAFLOW_TIMEOUT);
+        timeoutMillis = (long) FormatUtils.getPreciseTimeDuration(timeout, 
TimeUnit.MILLISECONDS);
+
+        topicName = properties.get(StatelessNiFiSourceConnector.TOPIC_NAME);
+        topicNameAttribute = 
properties.get(StatelessNiFiSourceConnector.TOPIC_NAME_ATTRIBUTE);
+        keyAttributeName = 
properties.get(StatelessNiFiSourceConnector.KEY_ATTRIBUTE);
+
+        if (topicName == null && topicNameAttribute == null) {
+            throw new ConfigException("Either the topic.name or 
topic.name.attribute configuration must be specified");
+        }
+
+        final String headerRegex = 
properties.get(StatelessNiFiSourceConnector.HEADER_REGEX);
+        headerAttributeNamePattern = headerRegex == null ? null : 
Pattern.compile(headerRegex);
+
+        dataflow = StatelessKafkaConnectorUtil.createDataflow(properties);
+        primaryNodeOnly = dataflow.isSourcePrimaryNodeOnly();
+
+        // Determine the name of the Output Port to retrieve data from
+        dataflowName = 
properties.get(StatelessKafkaConnectorUtil.DATAFLOW_NAME);
+        outputPortName = 
properties.get(StatelessNiFiSourceConnector.OUTPUT_PORT_NAME);
+        if (outputPortName == null) {
+            final Set<String> outputPorts = dataflow.getOutputPortNames();
+            if (outputPorts.isEmpty()) {
+                throw new ConfigException("The dataflow specified for <" + 
dataflowName + "> does not have an Output Port at the root level. Dataflows 
used for a Kafka Connect Source Task "
+                    + "must have at least one Output Port at the root level.");
+            }
+
+            if (outputPorts.size() > 1) {
+                throw new ConfigException("The dataflow specified for <" + 
dataflowName + "> has multiple Output Ports at the root level (" + 
outputPorts.toString()
+                    + "). The " + 
StatelessNiFiSourceConnector.OUTPUT_PORT_NAME + " property must be set to 
indicate which of these Ports Kafka records should be retrieved from.");
+            }
+
+            outputPortName = outputPorts.iterator().next();
+        }
+
+        final String taskIndex = properties.get(STATE_MAP_KEY);
+        localStatePartitionMap.put(STATE_MAP_KEY, taskIndex);
+        primaryNodeTask = "0".equals(taskIndex);
+
+        if (primaryNodeOnly && !primaryNodeTask) {
+            logger.warn("Configured Dataflow ({}) requires that the source be 
run only on the Primary Node, but the Connector is configured with more than 
one task. The dataflow will only be run by" +

Review comment:
       If the task should only run in a single instance, the connector should 
only create a single task configuration.

##########
File path: 
nifi-external/nifi-kafka-connect/nifi-kafka-connector/src/main/java/org/apache/nifi/kafka/connect/StatelessNiFiSourceTask.java
##########
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.kafka.connect;
+
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.errors.RetriableException;
+import org.apache.kafka.connect.header.ConnectHeaders;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.apache.kafka.connect.source.SourceTask;
+import org.apache.nifi.components.state.Scope;
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.stateless.flow.DataflowTrigger;
+import org.apache.nifi.stateless.flow.StatelessDataflow;
+import org.apache.nifi.stateless.flow.TriggerResult;
+import org.apache.nifi.util.FormatUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.regex.Pattern;
+
+public class StatelessNiFiSourceTask extends SourceTask {
+    public static final String STATE_MAP_KEY = "task.index";
+    private static final Logger logger = 
LoggerFactory.getLogger(StatelessNiFiSourceTask.class);
+
+    private StatelessDataflow dataflow;
+    private String outputPortName;
+    private String topicName;
+    private String topicNameAttribute;
+    private TriggerResult triggerResult;
+    private String keyAttributeName;
+    private Pattern headerAttributeNamePattern;
+    private long timeoutMillis;
+    private String dataflowName;
+    private long failureYieldExpiration = 0L;
+
+    private final Map<String, String> clusterStatePartitionMap = 
Collections.singletonMap(STATE_MAP_KEY, "CLUSTER");
+    private Map<String, String> localStatePartitionMap = new HashMap<>();
+    private boolean primaryNodeOnly;
+    private boolean primaryNodeTask;
+
+    private final AtomicLong unacknowledgedRecords = new AtomicLong(0L);
+
+    @Override
+    public String version() {
+        return StatelessKafkaConnectorUtil.getVersion();
+    }
+
+    @Override
+    public void start(final Map<String, String> properties) {
+        logger.info("Starting Source Task with properties {}", 
StatelessKafkaConnectorUtil.getLoggableProperties(properties));
+
+        final String timeout = 
properties.getOrDefault(StatelessKafkaConnectorUtil.DATAFLOW_TIMEOUT, 
StatelessKafkaConnectorUtil.DEFAULT_DATAFLOW_TIMEOUT);
+        timeoutMillis = (long) FormatUtils.getPreciseTimeDuration(timeout, 
TimeUnit.MILLISECONDS);
+
+        topicName = properties.get(StatelessNiFiSourceConnector.TOPIC_NAME);
+        topicNameAttribute = 
properties.get(StatelessNiFiSourceConnector.TOPIC_NAME_ATTRIBUTE);
+        keyAttributeName = 
properties.get(StatelessNiFiSourceConnector.KEY_ATTRIBUTE);
+
+        if (topicName == null && topicNameAttribute == null) {
+            throw new ConfigException("Either the topic.name or 
topic.name.attribute configuration must be specified");
+        }
+
+        final String headerRegex = 
properties.get(StatelessNiFiSourceConnector.HEADER_REGEX);
+        headerAttributeNamePattern = headerRegex == null ? null : 
Pattern.compile(headerRegex);
+
+        dataflow = StatelessKafkaConnectorUtil.createDataflow(properties);
+        primaryNodeOnly = dataflow.isSourcePrimaryNodeOnly();
+
+        // Determine the name of the Output Port to retrieve data from
+        dataflowName = 
properties.get(StatelessKafkaConnectorUtil.DATAFLOW_NAME);
+        outputPortName = 
properties.get(StatelessNiFiSourceConnector.OUTPUT_PORT_NAME);
+        if (outputPortName == null) {
+            final Set<String> outputPorts = dataflow.getOutputPortNames();
+            if (outputPorts.isEmpty()) {
+                throw new ConfigException("The dataflow specified for <" + 
dataflowName + "> does not have an Output Port at the root level. Dataflows 
used for a Kafka Connect Source Task "
+                    + "must have at least one Output Port at the root level.");
+            }
+
+            if (outputPorts.size() > 1) {
+                throw new ConfigException("The dataflow specified for <" + 
dataflowName + "> has multiple Output Ports at the root level (" + 
outputPorts.toString()
+                    + "). The " + 
StatelessNiFiSourceConnector.OUTPUT_PORT_NAME + " property must be set to 
indicate which of these Ports Kafka records should be retrieved from.");
+            }
+
+            outputPortName = outputPorts.iterator().next();
+        }
+
+        final String taskIndex = properties.get(STATE_MAP_KEY);
+        localStatePartitionMap.put(STATE_MAP_KEY, taskIndex);
+        primaryNodeTask = "0".equals(taskIndex);
+
+        if (primaryNodeOnly && !primaryNodeTask) {
+            logger.warn("Configured Dataflow ({}) requires that the source be 
run only on the Primary Node, but the Connector is configured with more than 
one task. The dataflow will only be run by" +
+                " one of the tasks.", dataflowName);
+        }
+
+        final Map<String, String> localStateMap = (Map<String, String>) (Map) 
context.offsetStorageReader().offset(localStatePartitionMap);
+        final Map<String, String> clusterStateMap = (Map<String, String>) 
(Map) context.offsetStorageReader().offset(clusterStatePartitionMap);
+
+        dataflow.setComponentStates(localStateMap, Scope.LOCAL);
+        dataflow.setComponentStates(clusterStateMap, Scope.CLUSTER);
+    }
+
+    @Override
+    public List<SourceRecord> poll() throws InterruptedException {
+        final long yieldExpiration = Math.max(failureYieldExpiration, 
dataflow.getSourceYieldExpiration());
+        final long now = System.currentTimeMillis();
+        final long yieldMillis = yieldExpiration - now;
+        if (yieldMillis > 0) {
+            // If source component has yielded, we don't want to trigger it 
again until the yield expiration expires, in order to avoid
+            // overloading the source system.
+            logger.debug("Source of NiFi flow has opted to yield for {} 
milliseconds. Will pause dataflow until that time period has elapsed.", 
yieldMillis);
+            Thread.sleep(yieldMillis);
+            return null;
+        }
+
+        // If the source of the dataflow requires that the task be run on 
Primary Node Only, and this is not Task 0, then
+        // we do not want to run the task.
+        if (primaryNodeOnly && !primaryNodeTask) {
+            logger.debug("Source of dataflow {} is to be run on Primary Node 
only, and this task is not the Primary Node task. Will not trigger dataflow.", 
dataflow);
+            return null;
+        }
+
+        if (unacknowledgedRecords.get() > 0) {
+            // If we have records that haven't yet been acknowledged, we want 
to return null instead of running.
+            // We need to wait for the last results to complete before 
triggering the dataflow again.
+            return null;
+        }
+
+        logger.debug("Triggering dataflow");
+        final long start = System.nanoTime();
+
+        final DataflowTrigger trigger = dataflow.trigger();
+        final Optional<TriggerResult> resultOptional = 
trigger.getResult(timeoutMillis, TimeUnit.MILLISECONDS);
+        if (!resultOptional.isPresent()) {
+            logger.warn("Dataflow timed out after waiting {} milliseconds. 
Will cancel the execution.", timeoutMillis);
+            trigger.cancel();
+            return null;
+        }
+
+        triggerResult = resultOptional.get();
+
+        if (!triggerResult.isSuccessful()) {
+            logger.error("Dataflow {} failed to execute properly", 
dataflowName, triggerResult.getFailureCause().orElse(null));
+            trigger.cancel();
+            failureYieldExpiration = System.currentTimeMillis() + 1000L; // 
delay next execution for 1 second to avoid constnatly failing and utilization 
huge amounts of resources
+            return null;
+        }
+
+        // Verify that data was only transferred to the expected Output Port
+        verifyFlowFilesTransferredToProperPort(triggerResult, outputPortName, 
trigger);
+
+        final long nanos = System.nanoTime() - start;
+
+        final List<FlowFile> outputFlowFiles = 
triggerResult.getOutputFlowFiles(outputPortName);
+        final List<SourceRecord> sourceRecords = new 
ArrayList<>(outputFlowFiles.size());
+
+        Map<String, ?> componentState = 
dataflow.getComponentStates(Scope.CLUSTER);
+        final Map<String, ?> partitionMap;
+        if (componentState == null || componentState.isEmpty()) {
+            componentState = dataflow.getComponentStates(Scope.LOCAL);
+            partitionMap = localStatePartitionMap;
+        } else {
+            partitionMap = clusterStatePartitionMap;
+        }
+
+        for (final FlowFile flowFile : outputFlowFiles) {
+            final byte[] contents = triggerResult.readContent(flowFile);
+            final SourceRecord sourceRecord = createSourceRecord(flowFile, 
contents, componentState, partitionMap);
+            sourceRecords.add(sourceRecord);
+        }
+
+        logger.debug("Returning {} records from poll() method (took {} nanos 
to run dataflow)", sourceRecords.size(), nanos);
+
+        // If there is at least one record, we don't want to acknowledge the 
trigger result until Kafka has committed the Record.
+        // This is handled by incrementing the unacknkowledgedRecords count. 
Then, Kafka Connect will call this.commitRecords().
+        // The commitRecords() call will then decrement the number of 
unacknowledgedRecords, and when all unacknowledged Records have been
+        // acknowledged, it will acknowledge the trigger result.
+        //
+        // However, if there are no records, this.commitRecords() will never 
be called. As a result, we need toe nsure that we acknowledge the trigger 
result here.
+        if (sourceRecords.size() > 0) {
+            unacknowledgedRecords.addAndGet(sourceRecords.size());
+        } else {
+            triggerResult.acknowledge();
+        }
+
+        return sourceRecords;
+    }
+
+    private void verifyFlowFilesTransferredToProperPort(final TriggerResult 
triggerResult, final String expectedPortName, final DataflowTrigger trigger) {
+        final Map<String, List<FlowFile>> flowFileOutputMap = 
triggerResult.getOutputFlowFiles();
+
+        for (final Map.Entry<String, List<FlowFile>> entry : 
flowFileOutputMap.entrySet()) {
+            final String portName = entry.getKey();
+            final List<FlowFile> flowFiles = entry.getValue();
+
+            if (!flowFiles.isEmpty() && !expectedPortName.equals(portName)) {
+                logger.error("Dataflow transferred FlowFiles to Port {} but 
was expecting data to be transferred to {}. Rolling back session.", portName, 
expectedPortName);
+                trigger.cancel();
+                throw new RetriableException("Data was transferred to 
unexpected port. Expected: " + expectedPortName + ". Actual: " + portName);
+            }
+        }
+    }
+
+
+    private SourceRecord createSourceRecord(final FlowFile flowFile, final 
byte[] contents, final Map<String, ?> componentState, final Map<String, ?> 
partitionMap) {
+        final Schema valueSchema = (contents == null || contents.length == 0) 
? null : Schema.BYTES_SCHEMA;
+
+        // Kafka Connect currently gives us no way to determine the number of 
partitions that a given topic has.
+        // Therefore, we have no way to partition based on an attribute or 
anything like that, unless we left it up to
+        // the dataflow developer to know how many partitions exist a priori 
and explicitly set an attribute in the range of 0..max,
+        // but that is not a great solution. Kafka does support using a Simple 
Message Transform to change the partition of a given
+        // record, so that may be the best solution.
+        final Integer topicPartition = null;
+
+        final String topic;
+        if (topicNameAttribute == null) {
+            topic = topicName;
+        } else {
+            final String attributeValue = 
flowFile.getAttribute(topicNameAttribute);
+            topic = attributeValue == null ? topicName : attributeValue;
+        }
+
+        final ConnectHeaders headers = new ConnectHeaders();
+        if (headerAttributeNamePattern != null) {
+            // TODO: When we download/create the dataflow, create a hash of 
it. Then save that state. When we do it next time,
+            //       compare the hash to the last one. If changed, need to 
trigger connect framework to tell it that the config has changed.
+            //       Would be done via Source/Sink Context.
+            //       Or perhaps we should include the flow JSON itself in the 
configuration... would require that we string-ify the JSON though. This would 
be the cleanest, though. Would be optional.
+            //       We can just document that you either include it inline, 
or you don't make changes to the dataflow; instead, save as a separate dataflow 
and update task to point to the new one.
+
+            for (final Map.Entry<String, String> entry : 
flowFile.getAttributes().entrySet()) {
+                if 
(headerAttributeNamePattern.matcher(entry.getKey()).matches()) {
+                    final String headerName = entry.getKey();
+                    final String headerValue = entry.getValue();
+                    headers.add(headerName, headerValue, Schema.STRING_SCHEMA);
+                }
+            }
+        }
+
+        final Object key = keyAttributeName == null ? null : 
flowFile.getAttribute(keyAttributeName);
+        final Schema keySchema = key == null ? null : Schema.STRING_SCHEMA;
+        final Long timestamp = System.currentTimeMillis();
+
+        return new SourceRecord(partitionMap, componentState, topic, 
topicPartition, keySchema, key, valueSchema, contents, timestamp, headers);
+    }
+
+    @Override
+    public void commitRecord(final SourceRecord record, final RecordMetadata 
metadata) throws InterruptedException {
+        super.commitRecord(record, metadata);
+
+        final long unacked = unacknowledgedRecords.decrementAndGet();
+        logger.debug("SourceRecord {} committed; number of unacknowledged 
FlowFiles is now {}", record, unacked);
+
+        if (unacked < 1) {

Review comment:
       Since this is usually invoked from another thread, there can be an edge 
case where:
   
   - the unacknowledgedRecords is already decremented, making it 0, but the 
result is still not acknowledged
   - poll is invoked at this point on the task thread, and we trigger the 
dataflow while the previous result is still not acknowledged.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to