exceptionfactory commented on a change in pull request #4646:
URL: https://github.com/apache/nifi/pull/4646#discussion_r529080745



##########
File path: 
nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestDeduplicateRecords.java
##########
@@ -0,0 +1,318 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.standard;
+
+import org.apache.nifi.controller.AbstractControllerService;
+import org.apache.nifi.distributed.cache.client.Deserializer;
+import org.apache.nifi.distributed.cache.client.DistributedMapCacheClient;
+import org.apache.nifi.distributed.cache.client.Serializer;
+import org.apache.nifi.reporting.InitializationException;
+import org.apache.nifi.serialization.record.MockRecordParser;
+import org.apache.nifi.serialization.record.MockRecordWriter;
+import org.apache.nifi.serialization.record.RecordFieldType;
+import org.apache.nifi.util.MockFlowFile;
+import org.apache.nifi.util.TestRunner;
+import org.apache.nifi.util.TestRunners;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestDeduplicateRecords {
+
+    private TestRunner runner;
+    private MockRecordParser reader;
+    private MockRecordWriter writer;
+
+    @BeforeClass
+    public static void beforeClass() {
+        System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "info");
+        System.setProperty("org.slf4j.simpleLogger.showDateTime", "true");
+        System.setProperty("org.slf4j.simpleLogger.log.nifi.io.nio", "debug");
+        
System.setProperty("org.slf4j.simpleLogger.log.nifi.processors.standard.DeduplicateRecords",
 "debug");
+        
System.setProperty("org.slf4j.simpleLogger.log.nifi.processors.standard.TestDeduplicateRecords",
 "debug");
+    }
+
+    @Before
+    public void setup() throws InitializationException {
+        runner = TestRunners.newTestRunner(DeduplicateRecords.class);
+
+        // RECORD_READER, RECORD_WRITER
+        reader = new MockRecordParser();
+        writer = new MockRecordWriter("header", false);
+
+        runner.addControllerService("reader", reader);
+        runner.enableControllerService(reader);
+        runner.addControllerService("writer", writer);
+        runner.enableControllerService(writer);
+
+        runner.setProperty(DeduplicateRecords.RECORD_READER, "reader");
+        runner.setProperty(DeduplicateRecords.RECORD_WRITER, "writer");
+
+        reader.addSchemaField("firstName", RecordFieldType.STRING);
+        reader.addSchemaField("middleName", RecordFieldType.STRING);
+        reader.addSchemaField("lastName", RecordFieldType.STRING);
+
+        // INCLUDE_ZERO_RECORD_FLOWFILES
+        runner.setProperty(DeduplicateRecords.INCLUDE_ZERO_RECORD_FLOWFILES, 
"true");
+
+        // CACHE_ENTRY_IDENTIFIER
+
+        runner.assertValid();
+    }
+
+    void commonEnqueue() {
+        final Map<String, String> props = new HashMap<>();
+        props.put("hash.value", "1000");
+        runner.enqueue(new byte[]{}, props);
+    }
+
+    @Test
+    public void testDetectDuplicatesHashSet() {
+        commonEnqueue();
+
+        runner.setProperty(DeduplicateRecords.FILTER_TYPE, 
DeduplicateRecords.HASH_SET_VALUE);
+        runner.setProperty("/middleName", "${field.value}");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 2, 1);
+    }
+
+    @Test
+    public void testDetectDuplicatesBloomFilter() {
+        commonEnqueue();
+        runner.setProperty(DeduplicateRecords.FILTER_TYPE, 
DeduplicateRecords.BLOOM_FILTER_VALUE);
+        runner.setProperty(DeduplicateRecords.BLOOM_FILTER_FPP, "0.10");
+        runner.setProperty("/middleName", "${field.value}");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 2, 1);
+    }
+
+    @Test
+    public void testNoDuplicatesHashSet() {
+        commonEnqueue();
+        runner.setProperty(DeduplicateRecords.FILTER_TYPE, 
DeduplicateRecords.HASH_SET_VALUE);
+        runner.setProperty("/middleName", "${field.value}");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jack", "Z", "Brown");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 3, 0);
+    }
+
+    @Test
+    public void testNoDuplicatesBloomFilter() {
+        commonEnqueue();
+        runner.setProperty(DeduplicateRecords.FILTER_TYPE, 
DeduplicateRecords.BLOOM_FILTER_VALUE);
+        runner.setProperty(DeduplicateRecords.BLOOM_FILTER_FPP, "0.10");
+        runner.setProperty("/middleName", "${field.value}");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jack", "Z", "Brown");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 3, 0);
+    }
+
+    @Test
+    public void testAllDuplicates() {
+        commonEnqueue();
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("John", "Q", "Smith");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 1, 2);
+    }
+
+    @Test
+    public void testAllUnique() {
+        commonEnqueue();
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jack", "Z", "Brown");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 3, 0);
+    }
+
+    @Test
+    public void testCacheValueFromRecordPath() {
+        commonEnqueue();
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jack", "Z", "Brown");
+        reader.addRecord("Jack", "Z", "Brown");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 2, 1);
+
+//        cache.assertContains("KEY", "VALUE"); // TODO: Get the tests running 
so you can see what the key/value is in serialized form

Review comment:
       Should this comment be retained, or is the purpose to implement cache 
checking in a future test?

##########
File path: 
nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/DeduplicateRecords.java
##########
@@ -0,0 +1,675 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.standard;
+
+import com.google.common.base.Joiner;
+import com.google.common.hash.BloomFilter;
+import com.google.common.hash.Funnels;
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.codec.digest.MessageDigestAlgorithms;
+import org.apache.nifi.annotation.behavior.DynamicProperty;
+import org.apache.nifi.annotation.behavior.EventDriven;
+import org.apache.nifi.annotation.behavior.InputRequirement;
+import org.apache.nifi.annotation.behavior.InputRequirement.Requirement;
+import org.apache.nifi.annotation.behavior.SupportsBatching;
+import org.apache.nifi.annotation.behavior.SystemResource;
+import org.apache.nifi.annotation.behavior.SystemResourceConsideration;
+import org.apache.nifi.annotation.behavior.WritesAttribute;
+import org.apache.nifi.annotation.documentation.CapabilityDescription;
+import org.apache.nifi.annotation.documentation.SeeAlso;
+import org.apache.nifi.annotation.documentation.Tags;
+import org.apache.nifi.annotation.lifecycle.OnScheduled;
+import org.apache.nifi.components.AllowableValue;
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.components.PropertyValue;
+import org.apache.nifi.components.ValidationContext;
+import org.apache.nifi.components.ValidationResult;
+import org.apache.nifi.components.Validator;
+import org.apache.nifi.distributed.cache.client.DistributedMapCacheClient;
+import org.apache.nifi.distributed.cache.client.Serializer;
+import org.apache.nifi.expression.ExpressionLanguageScope;
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.flowfile.attributes.CoreAttributes;
+import org.apache.nifi.logging.ComponentLog;
+import org.apache.nifi.processor.AbstractProcessor;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.ProcessorInitializationContext;
+import org.apache.nifi.processor.Relationship;
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.processor.util.StandardValidators;
+import org.apache.nifi.record.path.FieldValue;
+import org.apache.nifi.record.path.RecordPath;
+import org.apache.nifi.record.path.RecordPathResult;
+import org.apache.nifi.record.path.util.RecordPathCache;
+import org.apache.nifi.record.path.validation.RecordPathPropertyNameValidator;
+import org.apache.nifi.record.path.validation.RecordPathValidator;
+import org.apache.nifi.serialization.RecordReader;
+import org.apache.nifi.serialization.RecordReaderFactory;
+import org.apache.nifi.serialization.RecordSetWriter;
+import org.apache.nifi.serialization.RecordSetWriterFactory;
+import org.apache.nifi.serialization.WriteResult;
+import org.apache.nifi.serialization.record.Record;
+import org.apache.nifi.serialization.record.RecordSchema;
+import org.apache.nifi.serialization.record.util.DataTypeUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.Serializable;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.util.stream.Collectors.toList;
+import static org.apache.commons.codec.binary.StringUtils.getBytesUtf8;
+
+@EventDriven
+@SupportsBatching
+@InputRequirement(Requirement.INPUT_REQUIRED)
+@SystemResourceConsideration(resource = SystemResource.MEMORY,
+    description = "The HashSet filter type will grow memory space 
proportionate to the number of unique records processed. " +
+        "The BloomFilter type will use constant memory regardless of the 
number of records processed.")
+@Tags({"text", "record", "update", "change", "replace", "modify", "distinct", 
"unique",
+    "filter", "hash", "dupe", "duplicate", "dedupe"})
+@CapabilityDescription("This processor attempts to deduplicate a record set in 
memory using either a hashset or a bloom filter. " +
+        "It operates on a per-file basis rather than across an entire data set 
that spans multiple files.")
+@WritesAttribute(attribute = "record.count", description = "The number of 
records processed.")
+@DynamicProperty(
+    name = "RecordPath",
+    value = "An expression language statement used to determine how the 
RecordPath is resolved. " +
+            "The following variables are availble: ${field.name}, 
${field.value}, ${field.type}",
+    description = "The name of each user-defined property must be a valid 
RecordPath.")
+@SeeAlso(classNames = {
+    
"org.apache.nifi.distributed.cache.client.DistributedMapCacheClientService",
+    "org.apache.nifi.distributed.cache.server.map.DistributedMapCacheServer",
+    "org.apache.nifi.processors.standard.DetectDuplicate"
+})
+public class DeduplicateRecords extends AbstractProcessor {
+
+    private static final String FIELD_NAME = "field.name";
+    private static final String FIELD_VALUE = "field.value";
+    private static final String FIELD_TYPE = "field.type";
+
+    private volatile RecordPathCache recordPathCache;
+    private volatile List<String> recordPaths;
+
+    // VALUES
+
+    static final AllowableValue NONE_ALGORITHM_VALUE = new 
AllowableValue("none", "None",
+            "Do not use a hashing algorithm. The value of resolved RecordPaths 
will be combined with tildes (~) to form the unique record key. " +
+                    "This may use significantly more storage depending on the 
size and shape or your data.");
+    static final AllowableValue MD5_ALGORITHM_VALUE = new 
AllowableValue(MessageDigestAlgorithms.MD5, "MD5",
+            "The MD5 message-digest algorithm.");
+    static final AllowableValue SHA1_ALGORITHM_VALUE = new 
AllowableValue(MessageDigestAlgorithms.SHA_1, "SHA-1",
+            "The SHA-1 cryptographic hash algorithm.");
+    static final AllowableValue SHA256_ALGORITHM_VALUE = new 
AllowableValue(MessageDigestAlgorithms.SHA3_256, "SHA-256",
+            "The SHA-256 cryptographic hash algorithm.");
+    static final AllowableValue SHA512_ALGORITHM_VALUE = new 
AllowableValue(MessageDigestAlgorithms.SHA3_512, "SHA-512",
+            "The SHA-512 cryptographic hash algorithm.");
+
+    static final AllowableValue HASH_SET_VALUE = new 
AllowableValue("hash-set", "HashSet",
+            "Exactly matches records seen before with 100% accuracy at the 
expense of more storage usage. " +
+                    "Stores the filter data in a single cache entry in the 
distributed cache, and is loaded entirely into memory during duplicate 
detection. " +
+                    "This filter is preferred for small to medium data sets 
and offers high performance  loaded into memory when this processor is 
running.");
+    static final AllowableValue BLOOM_FILTER_VALUE = new 
AllowableValue("bloom-filter", "BloomFilter",
+            "Space-efficient data structure ideal for large data sets using 
probability to determine if a record was seen previously. " +
+                    "False positive matches are possible, but false negatives 
are not – in other words, a query returns either \"possibly in the set\" or 
\"definitely not in the set\". " +
+                    "You should use this option if the FlowFile content is 
large and you can tolerate some duplication in the data. Uses constant storage 
space regardless of the record set size.");
+
+    // PROPERTIES
+
+    static final PropertyDescriptor RECORD_READER = new 
PropertyDescriptor.Builder()
+            .name("record-reader")
+            .displayName("Record Reader")
+            .description("Specifies the Controller Service to use for reading 
incoming data")
+            .identifiesControllerService(RecordReaderFactory.class)
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor RECORD_WRITER = new 
PropertyDescriptor.Builder()
+            .name("record-writer")
+            .displayName("Record Writer")
+            .description("Specifies the Controller Service to use for writing 
out the records")
+            .identifiesControllerService(RecordSetWriterFactory.class)
+            .required(true)
+            .build();
+
+    static final AllowableValue OPTION_SINGLE_FILE = new 
AllowableValue("single", "Single File");
+    static final AllowableValue OPTION_DATA_LAKE = new 
AllowableValue("dataLake", "Data Lake");
+
+    static final PropertyDescriptor DEDUPLICATION_STRATEGY = new 
PropertyDescriptor.Builder()
+            .name("deduplication-strategy")
+            .displayName("Deduplication Strategy")
+            .description("The strategy to use for detecting and isolating 
duplicate records. The option for doing it " +
+                    "across a single data file will operate in memory, whereas 
the one for going across the enter data lake " +
+                    "will require a distributed map cache.")
+            .allowableValues(OPTION_SINGLE_FILE, OPTION_DATA_LAKE)
+            .defaultValue(OPTION_SINGLE_FILE.getValue())
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor DISTRIBUTED_MAP_CACHE = new 
PropertyDescriptor.Builder()
+            .name("distributed-map-cache")
+            .displayName("Distributed Map Cache client")
+            .description("This configuration is required when the 
deduplication strategy is set to 'across data lake.' The map " +
+                    "cache will be used to check a data source such as HBase 
or Redis for entries indicating that a record has " +
+                    "been processed before. This option requires a downstream 
process that uses PutDistributedMapCache to write " +
+                    "an entry to the cache data source once the record has 
been processed to indicate that it has been handled before.")
+            .identifiesControllerService(DistributedMapCacheClient.class)
+            .required(false)
+            .addValidator(Validator.VALID)
+            .build();
+
+    static final PropertyDescriptor CACHE_IDENTIFIER = new 
PropertyDescriptor.Builder()
+            .name("cache-identifier")
+            .displayName("Cache Identifier")
+            .description("This option defines a record path operation to use 
for defining the cache identifier. It can be used " +
+                    "in addition to the hash settings. This field will have 
the expression language attribute \"record.hash.value\" " +
+                    "available to it to use with it to generate the record 
path operation.")
+            
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+            .required(false)
+            .addValidator(Validator.VALID)
+            .build();
+
+    static final PropertyDescriptor INCLUDE_ZERO_RECORD_FLOWFILES = new 
PropertyDescriptor.Builder()
+            .name("include-zero-record-flowfiles")
+            .displayName("Include Zero Record FlowFiles")
+            .description("When converting an incoming FlowFile, if the 
conversion results in no data, "
+                    + "this property specifies whether or not a FlowFile will 
be sent to the corresponding relationship")
+            .expressionLanguageSupported(ExpressionLanguageScope.NONE)
+            .allowableValues("true", "false")
+            .defaultValue("true")
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor RECORD_HASHING_ALGORITHM = new 
PropertyDescriptor.Builder()
+            .name("record-hashing-algorithm")
+            .displayName("Record Hashing Algorithm")
+            .description("The algorithm used to hash the combined set of 
resolved RecordPath values for cache storage.")
+            .allowableValues(
+                    NONE_ALGORITHM_VALUE,
+                    MD5_ALGORITHM_VALUE,
+                    SHA1_ALGORITHM_VALUE,
+                    SHA256_ALGORITHM_VALUE,
+                    SHA512_ALGORITHM_VALUE
+            )
+            .defaultValue(SHA1_ALGORITHM_VALUE.getValue())

Review comment:
       Is there a reason for making SHA-1 the default value?  The [NIST Policy 
on Hash 
Functions](https://csrc.nist.gov/Projects/Hash-Functions/NIST-Policy-on-Hash-Functions)
 and other sources recommend against using SHA-1 for security purposes.  
Although this processor may not be strictly targeted for security purposes, 
defaulting to SHA-256 seems like a better option.

##########
File path: 
nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/DeduplicateRecords.java
##########
@@ -0,0 +1,675 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.standard;
+
+import com.google.common.base.Joiner;
+import com.google.common.hash.BloomFilter;
+import com.google.common.hash.Funnels;
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.codec.digest.MessageDigestAlgorithms;
+import org.apache.nifi.annotation.behavior.DynamicProperty;
+import org.apache.nifi.annotation.behavior.EventDriven;
+import org.apache.nifi.annotation.behavior.InputRequirement;
+import org.apache.nifi.annotation.behavior.InputRequirement.Requirement;
+import org.apache.nifi.annotation.behavior.SupportsBatching;
+import org.apache.nifi.annotation.behavior.SystemResource;
+import org.apache.nifi.annotation.behavior.SystemResourceConsideration;
+import org.apache.nifi.annotation.behavior.WritesAttribute;
+import org.apache.nifi.annotation.documentation.CapabilityDescription;
+import org.apache.nifi.annotation.documentation.SeeAlso;
+import org.apache.nifi.annotation.documentation.Tags;
+import org.apache.nifi.annotation.lifecycle.OnScheduled;
+import org.apache.nifi.components.AllowableValue;
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.components.PropertyValue;
+import org.apache.nifi.components.ValidationContext;
+import org.apache.nifi.components.ValidationResult;
+import org.apache.nifi.components.Validator;
+import org.apache.nifi.distributed.cache.client.DistributedMapCacheClient;
+import org.apache.nifi.distributed.cache.client.Serializer;
+import org.apache.nifi.expression.ExpressionLanguageScope;
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.flowfile.attributes.CoreAttributes;
+import org.apache.nifi.logging.ComponentLog;
+import org.apache.nifi.processor.AbstractProcessor;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.ProcessorInitializationContext;
+import org.apache.nifi.processor.Relationship;
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.processor.util.StandardValidators;
+import org.apache.nifi.record.path.FieldValue;
+import org.apache.nifi.record.path.RecordPath;
+import org.apache.nifi.record.path.RecordPathResult;
+import org.apache.nifi.record.path.util.RecordPathCache;
+import org.apache.nifi.record.path.validation.RecordPathPropertyNameValidator;
+import org.apache.nifi.record.path.validation.RecordPathValidator;
+import org.apache.nifi.serialization.RecordReader;
+import org.apache.nifi.serialization.RecordReaderFactory;
+import org.apache.nifi.serialization.RecordSetWriter;
+import org.apache.nifi.serialization.RecordSetWriterFactory;
+import org.apache.nifi.serialization.WriteResult;
+import org.apache.nifi.serialization.record.Record;
+import org.apache.nifi.serialization.record.RecordSchema;
+import org.apache.nifi.serialization.record.util.DataTypeUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.Serializable;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.util.stream.Collectors.toList;
+import static org.apache.commons.codec.binary.StringUtils.getBytesUtf8;
+
+@EventDriven
+@SupportsBatching
+@InputRequirement(Requirement.INPUT_REQUIRED)
+@SystemResourceConsideration(resource = SystemResource.MEMORY,
+    description = "The HashSet filter type will grow memory space 
proportionate to the number of unique records processed. " +
+        "The BloomFilter type will use constant memory regardless of the 
number of records processed.")
+@Tags({"text", "record", "update", "change", "replace", "modify", "distinct", 
"unique",
+    "filter", "hash", "dupe", "duplicate", "dedupe"})
+@CapabilityDescription("This processor attempts to deduplicate a record set in 
memory using either a hashset or a bloom filter. " +
+        "It operates on a per-file basis rather than across an entire data set 
that spans multiple files.")
+@WritesAttribute(attribute = "record.count", description = "The number of 
records processed.")
+@DynamicProperty(
+    name = "RecordPath",
+    value = "An expression language statement used to determine how the 
RecordPath is resolved. " +
+            "The following variables are availble: ${field.name}, 
${field.value}, ${field.type}",
+    description = "The name of each user-defined property must be a valid 
RecordPath.")
+@SeeAlso(classNames = {
+    
"org.apache.nifi.distributed.cache.client.DistributedMapCacheClientService",
+    "org.apache.nifi.distributed.cache.server.map.DistributedMapCacheServer",
+    "org.apache.nifi.processors.standard.DetectDuplicate"
+})
+public class DeduplicateRecords extends AbstractProcessor {
+
+    private static final String FIELD_NAME = "field.name";
+    private static final String FIELD_VALUE = "field.value";
+    private static final String FIELD_TYPE = "field.type";
+
+    private volatile RecordPathCache recordPathCache;
+    private volatile List<String> recordPaths;
+
+    // VALUES
+
+    static final AllowableValue NONE_ALGORITHM_VALUE = new 
AllowableValue("none", "None",
+            "Do not use a hashing algorithm. The value of resolved RecordPaths 
will be combined with tildes (~) to form the unique record key. " +
+                    "This may use significantly more storage depending on the 
size and shape or your data.");
+    static final AllowableValue MD5_ALGORITHM_VALUE = new 
AllowableValue(MessageDigestAlgorithms.MD5, "MD5",
+            "The MD5 message-digest algorithm.");
+    static final AllowableValue SHA1_ALGORITHM_VALUE = new 
AllowableValue(MessageDigestAlgorithms.SHA_1, "SHA-1",
+            "The SHA-1 cryptographic hash algorithm.");
+    static final AllowableValue SHA256_ALGORITHM_VALUE = new 
AllowableValue(MessageDigestAlgorithms.SHA3_256, "SHA-256",
+            "The SHA-256 cryptographic hash algorithm.");
+    static final AllowableValue SHA512_ALGORITHM_VALUE = new 
AllowableValue(MessageDigestAlgorithms.SHA3_512, "SHA-512",
+            "The SHA-512 cryptographic hash algorithm.");
+
+    static final AllowableValue HASH_SET_VALUE = new 
AllowableValue("hash-set", "HashSet",
+            "Exactly matches records seen before with 100% accuracy at the 
expense of more storage usage. " +
+                    "Stores the filter data in a single cache entry in the 
distributed cache, and is loaded entirely into memory during duplicate 
detection. " +
+                    "This filter is preferred for small to medium data sets 
and offers high performance  loaded into memory when this processor is 
running.");
+    static final AllowableValue BLOOM_FILTER_VALUE = new 
AllowableValue("bloom-filter", "BloomFilter",
+            "Space-efficient data structure ideal for large data sets using 
probability to determine if a record was seen previously. " +
+                    "False positive matches are possible, but false negatives 
are not – in other words, a query returns either \"possibly in the set\" or 
\"definitely not in the set\". " +
+                    "You should use this option if the FlowFile content is 
large and you can tolerate some duplication in the data. Uses constant storage 
space regardless of the record set size.");
+
+    // PROPERTIES
+
+    static final PropertyDescriptor RECORD_READER = new 
PropertyDescriptor.Builder()
+            .name("record-reader")
+            .displayName("Record Reader")
+            .description("Specifies the Controller Service to use for reading 
incoming data")
+            .identifiesControllerService(RecordReaderFactory.class)
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor RECORD_WRITER = new 
PropertyDescriptor.Builder()
+            .name("record-writer")
+            .displayName("Record Writer")
+            .description("Specifies the Controller Service to use for writing 
out the records")
+            .identifiesControllerService(RecordSetWriterFactory.class)
+            .required(true)
+            .build();
+
+    static final AllowableValue OPTION_SINGLE_FILE = new 
AllowableValue("single", "Single File");
+    static final AllowableValue OPTION_DATA_LAKE = new 
AllowableValue("dataLake", "Data Lake");
+
+    static final PropertyDescriptor DEDUPLICATION_STRATEGY = new 
PropertyDescriptor.Builder()
+            .name("deduplication-strategy")
+            .displayName("Deduplication Strategy")
+            .description("The strategy to use for detecting and isolating 
duplicate records. The option for doing it " +
+                    "across a single data file will operate in memory, whereas 
the one for going across the enter data lake " +
+                    "will require a distributed map cache.")
+            .allowableValues(OPTION_SINGLE_FILE, OPTION_DATA_LAKE)
+            .defaultValue(OPTION_SINGLE_FILE.getValue())
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor DISTRIBUTED_MAP_CACHE = new 
PropertyDescriptor.Builder()
+            .name("distributed-map-cache")
+            .displayName("Distributed Map Cache client")
+            .description("This configuration is required when the 
deduplication strategy is set to 'across data lake.' The map " +
+                    "cache will be used to check a data source such as HBase 
or Redis for entries indicating that a record has " +
+                    "been processed before. This option requires a downstream 
process that uses PutDistributedMapCache to write " +
+                    "an entry to the cache data source once the record has 
been processed to indicate that it has been handled before.")
+            .identifiesControllerService(DistributedMapCacheClient.class)
+            .required(false)
+            .addValidator(Validator.VALID)
+            .build();
+
+    static final PropertyDescriptor CACHE_IDENTIFIER = new 
PropertyDescriptor.Builder()
+            .name("cache-identifier")
+            .displayName("Cache Identifier")
+            .description("This option defines a record path operation to use 
for defining the cache identifier. It can be used " +
+                    "in addition to the hash settings. This field will have 
the expression language attribute \"record.hash.value\" " +
+                    "available to it to use with it to generate the record 
path operation.")
+            
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+            .required(false)
+            .addValidator(Validator.VALID)
+            .build();
+
+    static final PropertyDescriptor INCLUDE_ZERO_RECORD_FLOWFILES = new 
PropertyDescriptor.Builder()
+            .name("include-zero-record-flowfiles")
+            .displayName("Include Zero Record FlowFiles")
+            .description("When converting an incoming FlowFile, if the 
conversion results in no data, "
+                    + "this property specifies whether or not a FlowFile will 
be sent to the corresponding relationship")
+            .expressionLanguageSupported(ExpressionLanguageScope.NONE)
+            .allowableValues("true", "false")
+            .defaultValue("true")
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor RECORD_HASHING_ALGORITHM = new 
PropertyDescriptor.Builder()
+            .name("record-hashing-algorithm")
+            .displayName("Record Hashing Algorithm")
+            .description("The algorithm used to hash the combined set of 
resolved RecordPath values for cache storage.")
+            .allowableValues(
+                    NONE_ALGORITHM_VALUE,
+                    MD5_ALGORITHM_VALUE,
+                    SHA1_ALGORITHM_VALUE,
+                    SHA256_ALGORITHM_VALUE,
+                    SHA512_ALGORITHM_VALUE
+            )
+            .defaultValue(SHA1_ALGORITHM_VALUE.getValue())
+            .expressionLanguageSupported(ExpressionLanguageScope.NONE)
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor FILTER_TYPE = new 
PropertyDescriptor.Builder()
+            .name("filter-type")
+            .displayName("Filter Type")
+            .description("The filter used to determine whether a record has 
been seen before based on the matching RecordPath criteria.")
+            .allowableValues(
+                    HASH_SET_VALUE,
+                    BLOOM_FILTER_VALUE
+            )
+            .defaultValue(HASH_SET_VALUE.getValue())
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor FILTER_CAPACITY_HINT = new 
PropertyDescriptor.Builder()
+            .name("filter-capacity-hint")
+            .displayName("Filter Capacity Hint")
+            .description("An estimation of the total number of unique records 
to be processed. " +
+                    "The more accurate this number is will lead to fewer false 
negatives on a BloomFilter.")
+            .defaultValue("25000")
+            .expressionLanguageSupported(ExpressionLanguageScope.NONE)
+            .addValidator(StandardValidators.INTEGER_VALIDATOR)
+            .required(true)
+            .build();
+
+    static final PropertyDescriptor BLOOM_FILTER_FPP = new 
PropertyDescriptor.Builder()
+            .name("bloom-filter-certainty")
+            .displayName("Bloom Filter Certainty")
+            .description("The desired false positive probability when using 
the BloomFilter type. " +
+                    "Using a value of .05 for example, guarantees a 
five-percent probability that the result is a false positive. " +
+                    "The closer to 1 this value is set, the more precise the 
result at the expense of more storage space utilization.")
+            .defaultValue("0.10")
+            .expressionLanguageSupported(ExpressionLanguageScope.NONE)
+            .addValidator(StandardValidators.NUMBER_VALIDATOR)
+            .required(false)
+            .build();
+
+
+    // RELATIONSHIPS
+
+    static final Relationship REL_DUPLICATE = new Relationship.Builder()
+            .name("duplicate")
+            .description("Records detected as duplicates in the FlowFile 
content will be routed to this relationship")
+            .build();
+
+    static final Relationship REL_NON_DUPLICATE = new Relationship.Builder()
+            .name("non-duplicate")
+            .description("If the record was not found in the cache, it will be 
routed to this relationship")
+            .build();
+
+    static final Relationship REL_ORIGINAL = new Relationship.Builder()
+            .name("original")
+            .description("The original input FlowFile is sent to this 
relationship unless there is a fatal error in the processing.")
+            .build();
+
+    static final Relationship REL_FAILURE = new Relationship.Builder()
+            .name("failure")
+            .description("If unable to communicate with the cache, the 
FlowFile will be penalized and routed to this relationship")
+            .build();
+
+    private List<PropertyDescriptor> descriptors;
+
+    private Set<Relationship> relationships;
+
+    @Override
+    protected void init(final ProcessorInitializationContext context) {
+        final List<PropertyDescriptor> descriptors = new ArrayList<>();
+        descriptors.add(DEDUPLICATION_STRATEGY);
+        descriptors.add(DISTRIBUTED_MAP_CACHE);
+        descriptors.add(CACHE_IDENTIFIER);
+        descriptors.add(RECORD_READER);
+        descriptors.add(RECORD_WRITER);
+        descriptors.add(INCLUDE_ZERO_RECORD_FLOWFILES);
+        descriptors.add(RECORD_HASHING_ALGORITHM);
+        descriptors.add(FILTER_TYPE);
+        descriptors.add(FILTER_CAPACITY_HINT);
+        descriptors.add(BLOOM_FILTER_FPP);
+        this.descriptors = Collections.unmodifiableList(descriptors);
+
+        final Set<Relationship> relationships = new HashSet<>();
+        relationships.add(REL_DUPLICATE);
+        relationships.add(REL_NON_DUPLICATE);
+        relationships.add(REL_ORIGINAL);
+        relationships.add(REL_FAILURE);
+        this.relationships = Collections.unmodifiableSet(relationships);
+    }
+
+    @Override
+    public Set<Relationship> getRelationships() {
+        return this.relationships;
+    }
+
+    @Override
+    public final List<PropertyDescriptor> getSupportedPropertyDescriptors() {
+        return descriptors;
+    }
+
+    @Override
+    protected PropertyDescriptor getSupportedDynamicPropertyDescriptor(final 
String propertyDescriptorName) {
+        return new PropertyDescriptor.Builder()
+                .name(propertyDescriptorName)
+                .description("Specifies a value to use from the record that 
matches the RecordPath: '" +
+                        propertyDescriptorName + "' which is used together 
with other specified " +
+                        "record path values to determine the uniqueness of a 
record. " +
+                        "Expression Language may reference variables 
'field.name', 'field.type', and 'field.value' " +
+                        "to access information about the field and the value 
of the field being evaluated.")
+                .required(false)
+                .dynamic(true)
+                
.expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+                .addValidator(new RecordPathPropertyNameValidator())
+                .build();
+    }
+
+    @Override
+    protected Collection<ValidationResult> customValidate(final 
ValidationContext context) {
+        RecordPathValidator recordPathValidator = new RecordPathValidator();
+        final List<ValidationResult> validationResults = 
context.getProperties().keySet().stream()
+                .filter(PropertyDescriptor::isDynamic)
+                .map(property -> recordPathValidator.validate(
+                        "User-defined Properties",
+                        property.getName(),
+                        context
+                )).collect(Collectors.toList());
+
+        boolean useSingleFile = 
context.getProperty(DEDUPLICATION_STRATEGY).getValue().equals(OPTION_SINGLE_FILE.getValue());
+
+        if (useSingleFile && context.getProperty(BLOOM_FILTER_FPP).isSet()) {
+            final double falsePositiveProbability = 
context.getProperty(BLOOM_FILTER_FPP).asDouble();
+            if (falsePositiveProbability < 0 || falsePositiveProbability > 1) {
+                validationResults.add(
+                        new ValidationResult.Builder()
+                                .subject(BLOOM_FILTER_FPP.getName() + " out of 
range.")
+                                
.input(String.valueOf(falsePositiveProbability))
+                                .explanation("Valid values are 0.0 - 1.0 
inclusive")
+                                .valid(false).build());
+            }
+        } else if (!useSingleFile) {
+            if (!context.getProperty(DISTRIBUTED_MAP_CACHE).isSet()) {
+                validationResults.add(new ValidationResult.Builder()
+                        .subject(DISTRIBUTED_MAP_CACHE.getName())
+                        .explanation("Data lake-level deduplication was 
chosen, but a distributed map cache client was " +
+                                "not configured")
+                        .valid(false).build());
+            }
+        }
+
+        return validationResults;
+    }
+
+    private DistributedMapCacheClient mapCacheClient;
+    private RecordReaderFactory readerFactory;
+    private RecordSetWriterFactory writerFactory;
+
+    private boolean useInMemoryStrategy;
+
+    @OnScheduled
+    public void compileRecordPaths(final ProcessContext context) {
+        final List<String> recordPaths = new ArrayList<>();
+
+        recordPaths.addAll(context.getProperties().keySet().stream()
+                .filter(PropertyDescriptor::isDynamic)
+                .map(PropertyDescriptor::getName)
+                .collect(toList()));
+
+        recordPathCache = new RecordPathCache(recordPaths.size());
+        this.recordPaths = recordPaths;
+
+        if (context.getProperty(DISTRIBUTED_MAP_CACHE).isSet()) {
+            mapCacheClient = 
context.getProperty(DISTRIBUTED_MAP_CACHE).asControllerService(DistributedMapCacheClient.class);
+        }
+
+        readerFactory = 
context.getProperty(RECORD_READER).asControllerService(RecordReaderFactory.class);
+        writerFactory = 
context.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class);
+
+        useInMemoryStrategy = 
context.getProperty(DEDUPLICATION_STRATEGY).getValue().equals(OPTION_SINGLE_FILE.getValue());
+    }
+
+    private FilterWrapper getFilter(ProcessContext context) {
+        if (useInMemoryStrategy) {
+            boolean useHashSet = context.getProperty(FILTER_TYPE).getValue()
+                    .equals(context.getProperty(HASH_SET_VALUE.getValue()));
+            final int filterCapacity = 
context.getProperty(FILTER_CAPACITY_HINT).asInteger();
+            return useHashSet
+                ? new HashSetFilterWrapper(new HashSet<>(filterCapacity))
+                : new BloomFilterWrapper(BloomFilter.create(
+                    Funnels.stringFunnel(Charset.defaultCharset()),
+                    filterCapacity,
+                    context.getProperty(BLOOM_FILTER_FPP).asDouble()
+                ));
+        } else {
+            return new DistributedMapCacheClientWrapper(mapCacheClient);
+        }
+    }
+
+    @Override
+    public void onTrigger(final ProcessContext context, final ProcessSession 
session) throws ProcessException {
+        final FlowFile flowFile = session.get();
+        if (flowFile == null) {
+            return;
+        }
+
+        final ComponentLog logger = getLogger();
+
+        FlowFile nonDuplicatesFlowFile = session.create(flowFile);
+        FlowFile duplicatesFlowFile = session.create(flowFile);
+
+        try {
+            final long now = System.currentTimeMillis();
+
+            final FilterWrapper filter = getFilter(context);
+
+            final String recordHashingAlgorithm = 
context.getProperty(RECORD_HASHING_ALGORITHM).getValue();
+            final MessageDigest messageDigest = 
recordHashingAlgorithm.equals(NONE_ALGORITHM_VALUE.getValue())
+                    ? null
+                    : DigestUtils.getDigest(recordHashingAlgorithm);
+            final Boolean matchWholeRecord = 
context.getProperties().keySet().stream().noneMatch(p -> p.isDynamic());
+
+            final InputStream inputStream = session.read(flowFile);
+
+            final RecordReader reader = 
readerFactory.createRecordReader(flowFile, inputStream, logger);
+
+            final RecordSchema writeSchema = 
writerFactory.getSchema(flowFile.getAttributes(), reader.getSchema());
+
+            final OutputStream nonDupeStream = 
session.write(nonDuplicatesFlowFile);
+            final OutputStream dupeStream = session.write(duplicatesFlowFile);
+
+            final RecordSetWriter nonDuplicatesWriter = 
writerFactory.createWriter(getLogger(), writeSchema, nonDupeStream, 
nonDuplicatesFlowFile);
+            final RecordSetWriter duplicatesWriter = 
writerFactory.createWriter(getLogger(), writeSchema, dupeStream, 
duplicatesFlowFile);
+
+            nonDuplicatesWriter.beginRecordSet();
+            duplicatesWriter.beginRecordSet();
+            Record record;
+
+            long index = 0;
+            while ((record = reader.nextRecord()) != null) {
+                String recordValue;
+
+                if (matchWholeRecord) {
+                    recordValue = Joiner.on('~').join(record.getValues());

Review comment:
       Recommend declaring a static class variable for the joining character 
since it is used in multiple places.

##########
File path: 
nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestDeduplicateRecords.java
##########
@@ -0,0 +1,318 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.standard;
+
+import org.apache.nifi.controller.AbstractControllerService;
+import org.apache.nifi.distributed.cache.client.Deserializer;
+import org.apache.nifi.distributed.cache.client.DistributedMapCacheClient;
+import org.apache.nifi.distributed.cache.client.Serializer;
+import org.apache.nifi.reporting.InitializationException;
+import org.apache.nifi.serialization.record.MockRecordParser;
+import org.apache.nifi.serialization.record.MockRecordWriter;
+import org.apache.nifi.serialization.record.RecordFieldType;
+import org.apache.nifi.util.MockFlowFile;
+import org.apache.nifi.util.TestRunner;
+import org.apache.nifi.util.TestRunners;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestDeduplicateRecords {
+
+    private TestRunner runner;
+    private MockRecordParser reader;
+    private MockRecordWriter writer;
+
+    @BeforeClass
+    public static void beforeClass() {
+        System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "info");
+        System.setProperty("org.slf4j.simpleLogger.showDateTime", "true");
+        System.setProperty("org.slf4j.simpleLogger.log.nifi.io.nio", "debug");
+        
System.setProperty("org.slf4j.simpleLogger.log.nifi.processors.standard.DeduplicateRecords",
 "debug");
+        
System.setProperty("org.slf4j.simpleLogger.log.nifi.processors.standard.TestDeduplicateRecords",
 "debug");
+    }
+
+    @Before
+    public void setup() throws InitializationException {
+        runner = TestRunners.newTestRunner(DeduplicateRecords.class);
+
+        // RECORD_READER, RECORD_WRITER
+        reader = new MockRecordParser();
+        writer = new MockRecordWriter("header", false);
+
+        runner.addControllerService("reader", reader);
+        runner.enableControllerService(reader);
+        runner.addControllerService("writer", writer);
+        runner.enableControllerService(writer);
+
+        runner.setProperty(DeduplicateRecords.RECORD_READER, "reader");
+        runner.setProperty(DeduplicateRecords.RECORD_WRITER, "writer");
+
+        reader.addSchemaField("firstName", RecordFieldType.STRING);
+        reader.addSchemaField("middleName", RecordFieldType.STRING);
+        reader.addSchemaField("lastName", RecordFieldType.STRING);
+
+        // INCLUDE_ZERO_RECORD_FLOWFILES
+        runner.setProperty(DeduplicateRecords.INCLUDE_ZERO_RECORD_FLOWFILES, 
"true");
+
+        // CACHE_ENTRY_IDENTIFIER
+
+        runner.assertValid();
+    }
+
+    void commonEnqueue() {
+        final Map<String, String> props = new HashMap<>();
+        props.put("hash.value", "1000");
+        runner.enqueue(new byte[]{}, props);
+    }
+
+    @Test
+    public void testDetectDuplicatesHashSet() {
+        commonEnqueue();
+
+        runner.setProperty(DeduplicateRecords.FILTER_TYPE, 
DeduplicateRecords.HASH_SET_VALUE);
+        runner.setProperty("/middleName", "${field.value}");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 2, 1);
+    }
+
+    @Test
+    public void testDetectDuplicatesBloomFilter() {
+        commonEnqueue();
+        runner.setProperty(DeduplicateRecords.FILTER_TYPE, 
DeduplicateRecords.BLOOM_FILTER_VALUE);
+        runner.setProperty(DeduplicateRecords.BLOOM_FILTER_FPP, "0.10");
+        runner.setProperty("/middleName", "${field.value}");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 2, 1);
+    }
+
+    @Test
+    public void testNoDuplicatesHashSet() {
+        commonEnqueue();
+        runner.setProperty(DeduplicateRecords.FILTER_TYPE, 
DeduplicateRecords.HASH_SET_VALUE);
+        runner.setProperty("/middleName", "${field.value}");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jack", "Z", "Brown");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 3, 0);
+    }
+
+    @Test
+    public void testNoDuplicatesBloomFilter() {
+        commonEnqueue();
+        runner.setProperty(DeduplicateRecords.FILTER_TYPE, 
DeduplicateRecords.BLOOM_FILTER_VALUE);
+        runner.setProperty(DeduplicateRecords.BLOOM_FILTER_FPP, "0.10");
+        runner.setProperty("/middleName", "${field.value}");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jack", "Z", "Brown");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 3, 0);
+    }
+
+    @Test
+    public void testAllDuplicates() {
+        commonEnqueue();
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("John", "Q", "Smith");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 1, 2);
+    }
+
+    @Test
+    public void testAllUnique() {
+        commonEnqueue();
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jack", "Z", "Brown");
+        reader.addRecord("Jane", "X", "Doe");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 3, 0);
+    }
+
+    @Test
+    public void testCacheValueFromRecordPath() {
+        commonEnqueue();
+        reader.addRecord("John", "Q", "Smith");
+        reader.addRecord("Jack", "Z", "Brown");
+        reader.addRecord("Jack", "Z", "Brown");
+
+        runner.enqueue("");
+        runner.run();
+
+        doCountTests(0, 1, 1, 1, 2, 1);
+
+//        cache.assertContains("KEY", "VALUE"); // TODO: Get the tests running 
so you can see what the key/value is in serialized form
+    }
+
+    /*
+     * These are all related to NIFI-6014
+     */
+
+    @Test
+    public void testDataLakeDeduplicationRequiresDMC() {
+        runner.setProperty(DeduplicateRecords.DEDUPLICATION_STRATEGY, 
DeduplicateRecords.OPTION_DATA_LAKE.getValue());
+        runner.assertNotValid();
+    }
+
+    @Test
+    public void testDeduplicateWithDMC() throws Exception {
+        DistributedMapCacheClient dmc = new MockCacheService<>();
+        runner.addControllerService("dmc", dmc);
+        runner.setProperty(DeduplicateRecords.DISTRIBUTED_MAP_CACHE, "dmc");
+        runner.setProperty(DeduplicateRecords.DEDUPLICATION_STRATEGY, 
DeduplicateRecords.OPTION_DATA_LAKE.getValue());
+        runner.enableControllerService(dmc);
+        runner.assertValid();
+
+        dmc.put("2875ba79836587028a920875a18ee5dceb837587", true, null, null);
+        dmc.put("6eeba6ecf9d263582f463890be339dbecbaf23c8", true, null, null);

Review comment:
       Recommend declaring static variables for these identifiers so that they 
can be reused in the following test method.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to