This is an automated email from the ASF dual-hosted git repository.
exceptionfactory pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/nifi.git
The following commit(s) were added to refs/heads/main by this push:
new f40205b536 NIFI-10502 Replaced "Flow File" with "FlowFile" across
component descriptions
f40205b536 is described below
commit f40205b536dd99e6994cb7c13b7034585efc6e4a
Author: dan-s1 <[email protected]>
AuthorDate: Wed Jan 28 21:06:32 2026 +0000
NIFI-10502 Replaced "Flow File" with "FlowFile" across component
descriptions
This closes #10824
Signed-off-by: David Handermann <[email protected]>
---
.../java/org/apache/nifi/util/FlowFilePackagerV2.java | 12 ++++++------
.../nifi/flowfile/attributes/FlowFileMediaType.java | 2 +-
.../flowfile/attributes/StandardFlowFileMediaType.java | 2 +-
.../nifi/processors/aws/kinesis/ConsumeKinesis.java | 16 ++++++++--------
.../additionalDetails.md | 2 +-
.../additionalDetails.md | 2 +-
.../nifi/processors/hadoop/AbstractPutHDFSRecord.java | 10 +++++-----
.../apache/nifi/processors/pgp/DecryptContentPGP.java | 2 +-
.../apache/nifi/processors/pgp/EncryptContentPGP.java | 2 +-
.../org/apache/nifi/processors/pgp/SignContentPGP.java | 2 +-
.../org/apache/nifi/processors/pgp/VerifyContentPGP.java | 2 +-
.../PythonControllerInteractionIT.java | 4 ++--
.../nifi/processors/standard/AbstractExecuteSQL.java | 15 ++++++++++-----
.../processors/standard/AbstractQueryDatabaseTable.java | 9 +++++++--
.../org/apache/nifi/processors/standard/ExecuteSQL.java | 16 ++++++++--------
.../nifi/processors/standard/ExecuteSQLRecord.java | 8 ++++----
.../apache/nifi/processors/standard/MonitorActivity.java | 2 +-
.../nifi/processors/standard/QueryDatabaseTable.java | 6 +++---
.../processors/standard/QueryDatabaseTableRecord.java | 12 ++++++------
.../standard/QueryDatabaseTableRecordTest.java | 7 ++++---
.../nifi/processors/standard/QueryDatabaseTableTest.java | 7 ++++---
.../apache/nifi/processors/standard/TestExecuteSQL.java | 1 +
.../nifi/processors/standard/TestExecuteSQLRecord.java | 1 +
.../nifi/controller/ControllerStatusReportingTask.java | 6 +++---
.../additionalDetails.md | 4 ++--
.../java/org/apache/nifi/web/api/dto/ControllerDTO.java | 12 ++++++------
.../java/org/apache/nifi/controller/AbstractPort.java | 2 +-
.../org/apache/nifi/controller/StandardFlowService.java | 2 +-
.../controller/status/history/NodeStatusDescriptor.java | 4 ++--
.../feature/jolt-transform-json-ui.component.html | 2 +-
30 files changed, 94 insertions(+), 80 deletions(-)
diff --git
a/nifi-commons/nifi-flowfile-packager/src/main/java/org/apache/nifi/util/FlowFilePackagerV2.java
b/nifi-commons/nifi-flowfile-packager/src/main/java/org/apache/nifi/util/FlowFilePackagerV2.java
index 3b8347ff8e..fb15fa82b3 100644
---
a/nifi-commons/nifi-flowfile-packager/src/main/java/org/apache/nifi/util/FlowFilePackagerV2.java
+++
b/nifi-commons/nifi-flowfile-packager/src/main/java/org/apache/nifi/util/FlowFilePackagerV2.java
@@ -29,12 +29,12 @@ import java.util.Map;
* </p>
*
* <pre>
- * Length Field : indicates the number of Flow File Attributes in the stream
- * 1 to N times (N=number of Flow File Attributes):
- * String Field : Flow File Attribute key name
- * String Field : Flow File Attribute value
- * Long : 8 bytes indicating the length of the Flow File content
- * Content : The next M bytes are the content of the Flow File.
+ * Length Field : indicates the number of FlowFile Attributes in the stream
+ * 1 to N times (N=number of FlowFile Attributes):
+ * String Field : FlowFile Attribute key name
+ * String Field : FlowFile Attribute value
+ * Long : 8 bytes indicating the length of the FlowFile content
+ * Content : The next M bytes are the content of the FlowFile.
* </pre>
*
* <pre>
diff --git
a/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/FlowFileMediaType.java
b/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/FlowFileMediaType.java
index 49009ef154..ea4a72cb99 100644
---
a/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/FlowFileMediaType.java
+++
b/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/FlowFileMediaType.java
@@ -17,7 +17,7 @@
package org.apache.nifi.flowfile.attributes;
/**
- * Flow File Media Type Definition
+ * FlowFile Media Type Definition
*/
public interface FlowFileMediaType {
/**
diff --git
a/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/StandardFlowFileMediaType.java
b/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/StandardFlowFileMediaType.java
index c4cd87ba70..15a6516381 100644
---
a/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/StandardFlowFileMediaType.java
+++
b/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/flowfile/attributes/StandardFlowFileMediaType.java
@@ -17,7 +17,7 @@
package org.apache.nifi.flowfile.attributes;
/**
- * Enumeration of standard Flow File Media Types
+ * Enumeration of standard FlowFile Media Types
*/
public enum StandardFlowFileMediaType implements FlowFileMediaType {
VERSION_1("application/flowfile-v1"),
diff --git
a/nifi-extension-bundles/nifi-aws-bundle/nifi-aws-kinesis/src/main/java/org/apache/nifi/processors/aws/kinesis/ConsumeKinesis.java
b/nifi-extension-bundles/nifi-aws-bundle/nifi-aws-kinesis/src/main/java/org/apache/nifi/processors/aws/kinesis/ConsumeKinesis.java
index 751ac6feef..4b9b857d8f 100644
---
a/nifi-extension-bundles/nifi-aws-bundle/nifi-aws-kinesis/src/main/java/org/apache/nifi/processors/aws/kinesis/ConsumeKinesis.java
+++
b/nifi-extension-bundles/nifi-aws-bundle/nifi-aws-kinesis/src/main/java/org/apache/nifi/processors/aws/kinesis/ConsumeKinesis.java
@@ -128,21 +128,21 @@ import static
org.apache.nifi.processors.aws.region.RegionUtil.REGION;
""")
@WritesAttributes({
@WritesAttribute(attribute = ConsumeKinesisAttributes.STREAM_NAME,
- description = "The name of the Kinesis Stream from which all
Kinesis Records in the Flow File were read"),
+ description = "The name of the Kinesis Stream from which all
Kinesis Records in the FlowFile were read"),
@WritesAttribute(attribute = SHARD_ID,
- description = "Shard ID from which all Kinesis Records in the
Flow File were read"),
+ description = "Shard ID from which all Kinesis Records in the
FlowFile were read"),
@WritesAttribute(attribute = PARTITION_KEY,
- description = "Partition key of the last Kinesis Record in the
Flow File"),
+ description = "Partition key of the last Kinesis Record in the
FlowFile"),
@WritesAttribute(attribute = FIRST_SEQUENCE_NUMBER,
- description = "A Sequence Number of the first Kinesis Record
in the Flow File"),
+ description = "A Sequence Number of the first Kinesis Record
in the FlowFile"),
@WritesAttribute(attribute = FIRST_SUB_SEQUENCE_NUMBER,
- description = "A SubSequence Number of the first Kinesis
Record in the Flow File. Generated by KPL when aggregating records into a
single Kinesis Record"),
+ description = "A SubSequence Number of the first Kinesis
Record in the FlowFile. Generated by KPL when aggregating records into a single
Kinesis Record"),
@WritesAttribute(attribute = LAST_SEQUENCE_NUMBER,
- description = "A Sequence Number of the last Kinesis Record in
the Flow File"),
+ description = "A Sequence Number of the last Kinesis Record in
the FlowFile"),
@WritesAttribute(attribute = LAST_SUB_SEQUENCE_NUMBER,
- description = "A SubSequence Number of the last Kinesis Record
in the Flow File. Generated by KPL when aggregating records into a single
Kinesis Record"),
+ description = "A SubSequence Number of the last Kinesis Record
in the FlowFile. Generated by KPL when aggregating records into a single
Kinesis Record"),
@WritesAttribute(attribute = APPROXIMATE_ARRIVAL_TIMESTAMP,
- description = "Approximate arrival timestamp of the last
Kinesis Record in the Flow File"),
+ description = "Approximate arrival timestamp of the last
Kinesis Record in the FlowFile"),
@WritesAttribute(attribute = MIME_TYPE,
description = "Sets the mime.type attribute to the MIME Type
specified by the Record Writer (if configured)"),
@WritesAttribute(attribute = RECORD_COUNT,
diff --git
a/nifi-extension-bundles/nifi-email-bundle/nifi-email-processors/src/main/resources/docs/org.apache.nifi.processors.email.ConsumeIMAP/additionalDetails.md
b/nifi-extension-bundles/nifi-email-bundle/nifi-email-processors/src/main/resources/docs/org.apache.nifi.processors.email.ConsumeIMAP/additionalDetails.md
index 1569576f47..f0ec2f028a 100644
---
a/nifi-extension-bundles/nifi-email-bundle/nifi-email-processors/src/main/resources/docs/org.apache.nifi.processors.email.ConsumeIMAP/additionalDetails.md
+++
b/nifi-extension-bundles/nifi-email-bundle/nifi-email-processors/src/main/resources/docs/org.apache.nifi.processors.email.ConsumeIMAP/additionalDetails.md
@@ -18,7 +18,7 @@
## Description:
This Processor consumes email messages via IMAP protocol and sends the content
of an email message as content of the
-Flow File. Content of the incoming email message is written as raw bytes to
the content of the outgoing Flow File.
+FlowFile. Content of the incoming email message is written as raw bytes to the
content of the outgoing FlowFile.
Different email providers may require additional Java Mail properties which
could be provided as dynamic properties. For
example, below is a sample configuration for GMail:
diff --git
a/nifi-extension-bundles/nifi-email-bundle/nifi-email-processors/src/main/resources/docs/org.apache.nifi.processors.email.ConsumePOP3/additionalDetails.md
b/nifi-extension-bundles/nifi-email-bundle/nifi-email-processors/src/main/resources/docs/org.apache.nifi.processors.email.ConsumePOP3/additionalDetails.md
index e7fb090493..e8dc97fb03 100644
---
a/nifi-extension-bundles/nifi-email-bundle/nifi-email-processors/src/main/resources/docs/org.apache.nifi.processors.email.ConsumePOP3/additionalDetails.md
+++
b/nifi-extension-bundles/nifi-email-bundle/nifi-email-processors/src/main/resources/docs/org.apache.nifi.processors.email.ConsumePOP3/additionalDetails.md
@@ -18,7 +18,7 @@
## Description:
This Processor consumes email messages via POP3 protocol and sends the content
of an email message as content of the
-Flow File. Content of the incoming email message is written as raw bytes to
the content of the outgoing Flow File.
+FlowFile. Content of the incoming email message is written as raw bytes to the
content of the outgoing FlowFile.
Since different serves may require different Java Mail properties such
properties could be provided via dynamic
properties. For example, below is a sample configuration for GMail:
diff --git
a/nifi-extension-bundles/nifi-extension-utils/nifi-record-utils/nifi-hadoop-record-utils/src/main/java/org/apache/nifi/processors/hadoop/AbstractPutHDFSRecord.java
b/nifi-extension-bundles/nifi-extension-utils/nifi-record-utils/nifi-hadoop-record-utils/src/main/java/org/apache/nifi/processors/hadoop/AbstractPutHDFSRecord.java
index 7882297900..0d2f0f34da 100644
---
a/nifi-extension-bundles/nifi-extension-utils/nifi-record-utils/nifi-hadoop-record-utils/src/main/java/org/apache/nifi/processors/hadoop/AbstractPutHDFSRecord.java
+++
b/nifi-extension-bundles/nifi-extension-utils/nifi-record-utils/nifi-hadoop-record-utils/src/main/java/org/apache/nifi/processors/hadoop/AbstractPutHDFSRecord.java
@@ -116,17 +116,17 @@ public abstract class AbstractPutHDFSRecord extends
AbstractHadoopProcessor {
public static final Relationship REL_SUCCESS = new Relationship.Builder()
.name("success")
- .description("Flow Files that have been successfully processed are
transferred to this relationship")
+ .description("FlowFiles that have been successfully processed are
transferred to this relationship")
.build();
public static final Relationship REL_RETRY = new Relationship.Builder()
.name("retry")
- .description("Flow Files that could not be processed due to issues
that can be retried are transferred to this relationship")
+ .description("FlowFiles that could not be processed due to issues
that can be retried are transferred to this relationship")
.build();
public static final Relationship REL_FAILURE = new Relationship.Builder()
.name("failure")
- .description("Flow Files that could not be processed due to issue
that cannot be retried are transferred to this relationship")
+ .description("FlowFiles that could not be processed due to issue
that cannot be retried are transferred to this relationship")
.build();
public static final String RECORD_COUNT_ATTR = "record.count";
@@ -274,13 +274,13 @@ public abstract class AbstractPutHDFSRecord extends
AbstractHadoopProcessor {
// write to tempFile first and on success rename to destFile
final Path tempFile = new Path(directoryPath, "." +
filenameValue) {
@Override
- public FileSystem getFileSystem(Configuration conf) throws
IOException {
+ public FileSystem getFileSystem(Configuration conf) {
return fileSystem;
}
};
final Path destFile = new Path(directoryPath, filenameValue) {
@Override
- public FileSystem getFileSystem(Configuration conf) throws
IOException {
+ public FileSystem getFileSystem(Configuration conf) {
return fileSystem;
}
};
diff --git
a/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/DecryptContentPGP.java
b/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/DecryptContentPGP.java
index 95bc5286fe..12ea63de21 100644
---
a/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/DecryptContentPGP.java
+++
b/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/DecryptContentPGP.java
@@ -164,7 +164,7 @@ public class DecryptContentPGP extends AbstractProcessor {
}
/**
- * On Trigger decrypts Flow File contents using configured properties
+ * On Trigger decrypts FlowFile contents using configured properties
*
* @param context Process Context
* @param session Process Session
diff --git
a/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/EncryptContentPGP.java
b/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/EncryptContentPGP.java
index 95521b78d5..98878a659d 100644
---
a/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/EncryptContentPGP.java
+++
b/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/EncryptContentPGP.java
@@ -181,7 +181,7 @@ public class EncryptContentPGP extends AbstractProcessor {
}
/**
- * On Trigger encrypts Flow File contents using configured properties
+ * On Trigger encrypts FlowFile contents using configured properties
*
* @param context Process Context
* @param session Process Session
diff --git
a/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/SignContentPGP.java
b/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/SignContentPGP.java
index 7c32ec72ca..7c8c6592b3 100644
---
a/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/SignContentPGP.java
+++
b/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/SignContentPGP.java
@@ -180,7 +180,7 @@ public class SignContentPGP extends AbstractProcessor {
}
/**
- * On Trigger generates signatures for Flow File contents using private
keys
+ * On Trigger generates signatures for FlowFile contents using private keys
*
* @param context Process Context
* @param session Process Session
diff --git
a/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/VerifyContentPGP.java
b/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/VerifyContentPGP.java
index 4b662a48a3..c5d43c803f 100644
---
a/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/VerifyContentPGP.java
+++
b/nifi-extension-bundles/nifi-pgp-bundle/nifi-pgp-processors/src/main/java/org/apache/nifi/processors/pgp/VerifyContentPGP.java
@@ -132,7 +132,7 @@ public class VerifyContentPGP extends AbstractProcessor {
}
/**
- * On Trigger verifies signatures found in Flow File contents using
configured properties
+ * On Trigger verifies signatures found in FlowFile contents using
configured properties
*
* @param context Process Context
* @param session Process Session
diff --git
a/nifi-extension-bundles/nifi-py4j-extension-bundle/nifi-py4j-integration-tests/src/test/java/org.apache.nifi.py4j/PythonControllerInteractionIT.java
b/nifi-extension-bundles/nifi-py4j-extension-bundle/nifi-py4j-integration-tests/src/test/java/org.apache.nifi.py4j/PythonControllerInteractionIT.java
index 82260b0436..6884c03215 100644
---
a/nifi-extension-bundles/nifi-py4j-extension-bundle/nifi-py4j-integration-tests/src/test/java/org.apache.nifi.py4j/PythonControllerInteractionIT.java
+++
b/nifi-extension-bundles/nifi-py4j-extension-bundle/nifi-py4j-integration-tests/src/test/java/org.apache.nifi.py4j/PythonControllerInteractionIT.java
@@ -597,9 +597,9 @@ public class PythonControllerInteractionIT {
runner.assertTransferCount("large", 1);
runner.assertTransferCount("failure", 0);
final FlowFile largeOutputFlowFile =
runner.getFlowFilesForRelationship("large").getFirst();
- assertEquals(largeInputFlowFile.getId(), largeOutputFlowFile.getId(),
"Large Transformed Flow File should be the same as inbound");
+ assertEquals(largeInputFlowFile.getId(), largeOutputFlowFile.getId(),
"Large Transformed FlowFile should be the same as inbound");
final FlowFile smallOutputFlowFile =
runner.getFlowFilesForRelationship("small").getFirst();
- assertEquals(smallInputFlowFile.getId(), smallOutputFlowFile.getId(),
"Small Transformed Flow File should be the same as inbound");
+ assertEquals(smallInputFlowFile.getId(), smallOutputFlowFile.getId(),
"Small Transformed FlowFile should be the same as inbound");
}
@Test
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractExecuteSQL.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractExecuteSQL.java
index f274c45274..8c0c8bd316 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractExecuteSQL.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractExecuteSQL.java
@@ -81,6 +81,11 @@ public abstract class AbstractExecuteSQL extends
AbstractProcessor {
.name("failure")
.description("SQL query execution failed. Incoming FlowFile will
be penalized and routed to this relationship")
.build();
+ static final List<String> OBSOLETE_MAX_ROWS_PER_FLOW_FILE = List.of(
+ "esql-max-rows",
+ "Max Rows Per Flow File"
+ );
+
protected Set<Relationship> relationships;
public static final PropertyDescriptor DBCP_SERVICE = new
PropertyDescriptor.Builder()
@@ -136,7 +141,7 @@ public abstract class AbstractExecuteSQL extends
AbstractProcessor {
.build();
public static final PropertyDescriptor MAX_ROWS_PER_FLOW_FILE = new
PropertyDescriptor.Builder()
- .name("Max Rows Per Flow File")
+ .name("Max Rows Per FlowFile")
.description("The maximum number of result rows that will be
included in a single FlowFile. This will allow you to break up very large "
+ "result sets into multiple FlowFiles. If the value
specified is zero, then all rows are returned in a single FlowFile.")
.defaultValue("0")
@@ -213,15 +218,15 @@ public abstract class AbstractExecuteSQL extends
AbstractProcessor {
config.renameProperty("sql-pre-query", SQL_PRE_QUERY.getName());
config.renameProperty("SQL select query", SQL_QUERY.getName());
config.renameProperty("sql-post-query", SQL_POST_QUERY.getName());
- config.renameProperty("esql-max-rows",
MAX_ROWS_PER_FLOW_FILE.getName());
config.renameProperty("esql-output-batch-size",
OUTPUT_BATCH_SIZE.getName());
config.renameProperty("esql-fetch-size", FETCH_SIZE.getName());
config.renameProperty("esql-auto-commit", AUTO_COMMIT.getName());
+ OBSOLETE_MAX_ROWS_PER_FLOW_FILE.forEach(obsoleteName ->
config.renameProperty(obsoleteName, MAX_ROWS_PER_FLOW_FILE.getName()));
}
@OnScheduled
public void setup(ProcessContext context) {
- // If the query is not set, then an incoming flow file is needed.
Otherwise fail the initialization
+ // If the query is not set, then an incoming flow file is needed.
Otherwise, fail the initialization
if (!context.getProperty(SQL_QUERY).isSet() &&
!context.hasIncomingConnection()) {
final String errorString = "Either the Select Query must be
specified or there must be an incoming connection "
+ "providing flowfile(s) containing a SQL select query";
@@ -295,7 +300,7 @@ public abstract class AbstractExecuteSQL extends
AbstractProcessor {
}
st.setQueryTimeout(queryTimeout); // timeout in seconds
- // Execute pre-query, throw exception and cleanup Flow Files
if fail
+ // Execute pre-query, throw exception and cleanup FlowFiles if
fail
Pair<String, SQLException> failure =
executeConfigStatements(con, preQueries);
if (failure != null) {
// In case of failure, assigning config query to
"selectQuery" to follow current error handling
@@ -453,7 +458,7 @@ public abstract class AbstractExecuteSQL extends
AbstractProcessor {
}
}
- // Execute post-query, throw exception and cleanup Flow Files
if fail
+ // Execute post-query, throw exception and cleanup FlowFiles
if fail
failure = executeConfigStatements(con, postQueries);
if (failure != null) {
selectQuery = failure.getLeft();
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractQueryDatabaseTable.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractQueryDatabaseTable.java
index 48016979cb..21bfc24f8d 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractQueryDatabaseTable.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractQueryDatabaseTable.java
@@ -76,6 +76,11 @@ public abstract class AbstractQueryDatabaseTable extends
AbstractDatabaseFetchPr
public static final String RESULT_TABLENAME = "tablename";
public static final String RESULT_ROW_COUNT = "querydbtable.row.count";
+ static final List<String> OBSOLETE_MAX_ROWS_PER_FLOW_FILE = List.of(
+ "qdbt-max-rows",
+ "Max Rows Per Flow File"
+ );
+
private static final AllowableValue TRANSACTION_READ_COMMITTED = new
AllowableValue(
String.valueOf(Connection.TRANSACTION_READ_COMMITTED),
"TRANSACTION_READ_COMMITTED"
@@ -126,7 +131,7 @@ public abstract class AbstractQueryDatabaseTable extends
AbstractDatabaseFetchPr
.build();
public static final PropertyDescriptor MAX_ROWS_PER_FLOW_FILE = new
PropertyDescriptor.Builder()
- .name("Max Rows Per Flow File")
+ .name("Max Rows Per FlowFile")
.description("The maximum number of result rows that will be
included in a single FlowFile. This will allow you to break up very large "
+ "result sets into multiple FlowFiles. If the value
specified is zero, then all rows are returned in a single FlowFile.")
.defaultValue("0")
@@ -526,7 +531,7 @@ public abstract class AbstractQueryDatabaseTable extends
AbstractDatabaseFetchPr
@Override
public void migrateProperties(PropertyConfiguration config) {
super.migrateProperties(config);
- config.renameProperty("qdbt-max-rows",
MAX_ROWS_PER_FLOW_FILE.getName());
+ OBSOLETE_MAX_ROWS_PER_FLOW_FILE.forEach(obsoleteName ->
config.renameProperty(obsoleteName, MAX_ROWS_PER_FLOW_FILE.getName()));
config.renameProperty("qdbt-output-batch-size",
OUTPUT_BATCH_SIZE.getName());
config.renameProperty("qdbt-max-frags", MAX_FRAGMENTS.getName());
config.renameProperty("transaction-isolation-level",
TRANS_ISOLATION_LEVEL.getName());
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java
index b6523c5fe0..7466143474 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java
@@ -82,24 +82,24 @@ import static
org.apache.nifi.util.db.JdbcProperties.USE_AVRO_LOGICAL_TYPES;
})
@WritesAttributes({
@WritesAttribute(attribute = "executesql.row.count", description =
"Contains the number of rows returned by the query. "
- + "If 'Max Rows Per Flow File' is set, then this number will
reflect the number of rows in the Flow File instead of the entire result set."),
+ + "If 'Max Rows Per FlowFile' is set, then this number will
reflect the number of rows in the FlowFile instead of the entire result set."),
@WritesAttribute(attribute = "executesql.query.duration", description
= "Combined duration of the query execution time and fetch time in
milliseconds. "
- + "If 'Max Rows Per Flow File' is set, then this number will
reflect only the fetch time for the rows in the Flow File instead of the entire
result set."),
+ + "If 'Max Rows Per FlowFile' is set, then this number will
reflect only the fetch time for the rows in the FlowFile instead of the entire
result set."),
@WritesAttribute(attribute = "executesql.query.executiontime",
description = "Duration of the query execution time in milliseconds. "
- + "This number will reflect the query execution time
regardless of the 'Max Rows Per Flow File' setting."),
+ + "This number will reflect the query execution time
regardless of the 'Max Rows Per FlowFile' setting."),
@WritesAttribute(attribute = "executesql.query.fetchtime", description
= "Duration of the result set fetch time in milliseconds. "
- + "If 'Max Rows Per Flow File' is set, then this number will
reflect only the fetch time for the rows in the Flow File instead of the entire
result set."),
+ + "If 'Max Rows Per FlowFile' is set, then this number will
reflect only the fetch time for the rows in the FlowFile instead of the entire
result set."),
@WritesAttribute(attribute = "executesql.resultset.index", description
= "Assuming multiple result sets are returned, "
+ "the zero based index of this result set."),
@WritesAttribute(attribute = "executesql.error.message", description =
"If processing an incoming flow file causes "
- + "an Exception, the Flow File is routed to failure and this
attribute is set to the exception message."),
- @WritesAttribute(attribute = "fragment.identifier", description = "If
'Max Rows Per Flow File' is set then all FlowFiles from the same query result
set "
+ + "an Exception, the FlowFile is routed to failure and this
attribute is set to the exception message."),
+ @WritesAttribute(attribute = "fragment.identifier", description = "If
'Max Rows Per FlowFile' is set then all FlowFiles from the same query result
set "
+ "will have the same value for the fragment.identifier
attribute. This can then be used to correlate the results."),
- @WritesAttribute(attribute = "fragment.count", description = "If 'Max
Rows Per Flow File' is set then this is the total number of "
+ @WritesAttribute(attribute = "fragment.count", description = "If 'Max
Rows Per FlowFile' is set then this is the total number of "
+ "FlowFiles produced by a single ResultSet. This can be used
in conjunction with the "
+ "fragment.identifier attribute in order to know how many
FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set,
then this "
+ "attribute will not be populated."),
- @WritesAttribute(attribute = "fragment.index", description = "If 'Max
Rows Per Flow File' is set then the position of this FlowFile in the list of "
+ @WritesAttribute(attribute = "fragment.index", description = "If 'Max
Rows Per FlowFile' is set then the position of this FlowFile in the list of "
+ "outgoing FlowFiles that were all derived from the same
result set FlowFile. This can be "
+ "used in conjunction with the fragment.identifier attribute
to know which FlowFiles originated from the same query result set and in what
order "
+ "FlowFiles were produced"),
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQLRecord.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQLRecord.java
index cbf91f940a..f34d4df059 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQLRecord.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQLRecord.java
@@ -87,14 +87,14 @@ import static
org.apache.nifi.util.db.JdbcProperties.USE_AVRO_LOGICAL_TYPES;
@WritesAttribute(attribute = "executesql.resultset.index", description
= "Assuming multiple result sets are returned, "
+ "the zero based index of this result set."),
@WritesAttribute(attribute = "executesql.error.message", description =
"If processing an incoming flow file causes "
- + "an Exception, the Flow File is routed to failure and this
attribute is set to the exception message."),
- @WritesAttribute(attribute = "fragment.identifier", description = "If
'Max Rows Per Flow File' is set then all FlowFiles from the same query result
set "
+ + "an Exception, the FlowFile is routed to failure and this
attribute is set to the exception message."),
+ @WritesAttribute(attribute = "fragment.identifier", description = "If
'Max Rows Per FlowFile' is set then all FlowFiles from the same query result
set "
+ "will have the same value for the fragment.identifier
attribute. This can then be used to correlate the results."),
- @WritesAttribute(attribute = "fragment.count", description = "If 'Max
Rows Per Flow File' is set then this is the total number of "
+ @WritesAttribute(attribute = "fragment.count", description = "If 'Max
Rows Per FlowFile' is set then this is the total number of "
+ "FlowFiles produced by a single ResultSet. This can be used
in conjunction with the "
+ "fragment.identifier attribute in order to know how many
FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set,
then this "
+ "attribute will not be populated."),
- @WritesAttribute(attribute = "fragment.index", description = "If 'Max
Rows Per Flow File' is set then the position of this FlowFile in the list of "
+ @WritesAttribute(attribute = "fragment.index", description = "If 'Max
Rows Per FlowFile' is set then the position of this FlowFile in the list of "
+ "outgoing FlowFiles that were all derived from the same
result set FlowFile. This can be "
+ "used in conjunction with the fragment.identifier attribute
to know which FlowFiles originated from the same query result set and in what
order "
+ "FlowFiles were produced"),
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/MonitorActivity.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/MonitorActivity.java
index 12eb534899..df961a95e8 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/MonitorActivity.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/MonitorActivity.java
@@ -559,7 +559,7 @@ public class MonitorActivity extends AbstractProcessor {
public void update(FlowFile flowFile) {
final long now = nowMillis();
if ((now - this.getLastActivity()) > syncPeriodMillis) {
- this.forceSync(); // Immediate synchronization if Flow Files
are infrequent, to mitigate false reports
+ this.forceSync(); // Immediate synchronization if FlowFiles
are infrequent, to mitigate false reports
}
this.lastSuccessfulTransfer = now;
if (saveAttributes) {
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTable.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTable.java
index 4fe0fd9664..2d1a90e56f 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTable.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTable.java
@@ -69,13 +69,13 @@ import static
org.apache.nifi.util.db.JdbcProperties.VARIABLE_REGISTRY_ONLY_DEFA
@WritesAttributes({
@WritesAttribute(attribute = "tablename", description = "Name of the
table being queried"),
@WritesAttribute(attribute = "querydbtable.row.count", description =
"The number of rows selected by the query"),
- @WritesAttribute(attribute = "fragment.identifier", description = "If
'Max Rows Per Flow File' is set then all FlowFiles from the same query result
set "
+ @WritesAttribute(attribute = "fragment.identifier", description = "If
'Max Rows Per FlowFile' is set then all FlowFiles from the same query result
set "
+ "will have the same value for the fragment.identifier
attribute. This can then be used to correlate the results."),
- @WritesAttribute(attribute = "fragment.count", description = "If 'Max
Rows Per Flow File' is set then this is the total number of "
+ @WritesAttribute(attribute = "fragment.count", description = "If 'Max
Rows Per FlowFile' is set then this is the total number of "
+ "FlowFiles produced by a single ResultSet. This can be used
in conjunction with the "
+ "fragment.identifier attribute in order to know how many
FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set,
then this "
+ "attribute will not be populated."),
- @WritesAttribute(attribute = "fragment.index", description = "If 'Max
Rows Per Flow File' is set then the position of this FlowFile in the list of "
+ @WritesAttribute(attribute = "fragment.index", description = "If 'Max
Rows Per FlowFile' is set then the position of this FlowFile in the list of "
+ "outgoing FlowFiles that were all derived from the same
result set FlowFile. This can be "
+ "used in conjunction with the fragment.identifier attribute
to know which FlowFiles originated from the same query result set and in what
order "
+ "FlowFiles were produced"),
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecord.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecord.java
index 962b63e7d6..99ddf7cf1e 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecord.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecord.java
@@ -73,13 +73,13 @@ import static
org.apache.nifi.util.db.JdbcProperties.VARIABLE_REGISTRY_ONLY_DEFA
@WritesAttributes({
@WritesAttribute(attribute = "tablename", description = "Name of the
table being queried"),
@WritesAttribute(attribute = "querydbtable.row.count", description =
"The number of rows selected by the query"),
- @WritesAttribute(attribute = "fragment.identifier", description = "If
'Max Rows Per Flow File' is set then all FlowFiles from the same query result
set "
+ @WritesAttribute(attribute = "fragment.identifier", description = "If
'Max Rows Per FlowFile' is set then all FlowFiles from the same query result
set "
+ "will have the same value for the fragment.identifier
attribute. This can then be used to correlate the results."),
- @WritesAttribute(attribute = "fragment.count", description = "If 'Max
Rows Per Flow File' is set then this is the total number of "
+ @WritesAttribute(attribute = "fragment.count", description = "If 'Max
Rows Per FlowFile' is set then this is the total number of "
+ "FlowFiles produced by a single ResultSet. This can be used
in conjunction with the "
+ "fragment.identifier attribute in order to know how many
FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set,
then this "
+ "attribute will not be populated."),
- @WritesAttribute(attribute = "fragment.index", description = "If 'Max
Rows Per Flow File' is set then the position of this FlowFile in the list of "
+ @WritesAttribute(attribute = "fragment.index", description = "If 'Max
Rows Per FlowFile' is set then the position of this FlowFile in the list of "
+ "outgoing FlowFiles that were all derived from the same
result set FlowFile. This can be "
+ "used in conjunction with the fragment.identifier attribute
to know which FlowFiles originated from the same query result set and in what
order "
+ "FlowFiles were produced"),
@@ -105,7 +105,7 @@ import static
org.apache.nifi.util.db.JdbcProperties.VARIABLE_REGISTRY_ONLY_DEFA
an `id` column that is a one-up number, or a `last_modified`
column that is a timestamp of when the row was last modified.
Set the "Initial Load Strategy" property to "Start at Beginning".
Set the "Fetch Size" to a number that avoids loading too much data
into memory on the NiFi side. For example, a value of `1000` will load up to
1,000 rows of data.
- Set the "Max Rows Per Flow File" to a value that allows efficient
processing, such as `1000` or `10000`.
+ Set the "Max Rows Per FlowFile" to a value that allows efficient
processing, such as `1000` or `10000`.
Set the "Output Batch Size" property to a value greater than `0`. A
smaller value, such as `1` or even `20` will result in lower latency but also
slightly lower throughput.
A larger value such as `1000` will result in higher throughput but
also higher latency. It is not recommended to set the value larger than `1000`
as it can cause significant
memory utilization.
@@ -123,7 +123,7 @@ import static
org.apache.nifi.util.db.JdbcProperties.VARIABLE_REGISTRY_ONLY_DEFA
an `id` column that is a one-up number, or a `last_modified`
column that is a timestamp of when the row was last modified.
Set the "Initial Load Strategy" property to "Start at Current Maximum
Values".
Set the "Fetch Size" to a number that avoids loading too much data
into memory on the NiFi side. For example, a value of `1000` will load up to
1,000 rows of data.
- Set the "Max Rows Per Flow File" to a value that allows efficient
processing, such as `1000` or `10000`.
+ Set the "Max Rows Per FlowFile" to a value that allows efficient
processing, such as `1000` or `10000`.
Set the "Output Batch Size" property to a value greater than `0`. A
smaller value, such as `1` or even `20` will result in lower latency but also
slightly lower throughput.
A larger value such as `1000` will result in higher throughput but
also higher latency. It is not recommended to set the value larger than `1000`
as it can cause significant
memory utilization.
@@ -159,7 +159,7 @@ import static
org.apache.nifi.util.db.JdbcProperties.VARIABLE_REGISTRY_ONLY_DEFA
an `id` column that is a one-up number, or a
`last_modified` column that is a timestamp of when the row was last modified.
Set the "Initial Load Strategy" property to "Start at Current
Maximum Values".
Set the "Fetch Size" to a number that avoids loading too much
data into memory on the NiFi side. For example, a value of `1000` will load up
to 1,000 rows of data.
- Set the "Max Rows Per Flow File" to a value that allows
efficient processing, such as `1000` or `10000`.
+ Set the "Max Rows Per FlowFile" to a value that allows
efficient processing, such as `1000` or `10000`.
Set the "Output Batch Size" property to a value greater than
`0`. A smaller value, such as `1` or even `20` will result in lower latency but
also slightly lower throughput.
A larger value such as `1000` will result in higher
throughput but also higher latency. It is not recommended to set the value
larger than `1000` as it can cause significant
memory utilization.
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecordTest.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecordTest.java
index cc95b3705a..fe51135654 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecordTest.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecordTest.java
@@ -126,7 +126,7 @@ class QueryDatabaseTableRecordTest extends
AbstractDatabaseConnectionServiceTest
runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0);
runner.clearTransferState();
- //Remove Max Rows Per Flow File
+ //Remove Max Rows Per FlowFile
runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE,
"0");
// Add a new row with a higher ID and run, one flowfile with one new
row should be transferred
@@ -802,7 +802,7 @@ class QueryDatabaseTableRecordTest extends
AbstractDatabaseConnectionServiceTest
runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0);
runner.clearTransferState();
- //Remove Max Rows Per Flow File
+ //Remove Max Rows Per FlowFile
runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE,
"0");
// Add a new row with a higher ID and run, one flowfile with one new
row should be transferred
@@ -928,7 +928,7 @@ class QueryDatabaseTableRecordTest extends
AbstractDatabaseConnectionServiceTest
runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0);
runner.clearTransferState();
- //Remove Max Rows Per Flow File
+ //Remove Max Rows Per FlowFile
runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE,
"0");
// Add a new row with a higher ID and run, one flowfile with one new
row should be transferred
@@ -1048,6 +1048,7 @@ class QueryDatabaseTableRecordTest extends
AbstractDatabaseConnectionServiceTest
Map.entry("db-fetch-where-clause",
AbstractDatabaseFetchProcessor.WHERE_CLAUSE.getName()),
Map.entry("db-fetch-sql-query",
AbstractDatabaseFetchProcessor.SQL_QUERY.getName()),
Map.entry("qdbt-max-rows",
AbstractQueryDatabaseTable.MAX_ROWS_PER_FLOW_FILE.getName()),
+ Map.entry("Max Rows Per Flow File",
AbstractQueryDatabaseTable.MAX_ROWS_PER_FLOW_FILE.getName()),
Map.entry("qdbt-output-batch-size",
AbstractQueryDatabaseTable.OUTPUT_BATCH_SIZE.getName()),
Map.entry("qdbt-max-frags",
AbstractQueryDatabaseTable.MAX_FRAGMENTS.getName()),
Map.entry("transaction-isolation-level",
AbstractQueryDatabaseTable.TRANS_ISOLATION_LEVEL.getName()),
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableTest.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableTest.java
index 0002781a11..4dc93d70a1 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableTest.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableTest.java
@@ -122,7 +122,7 @@ public class QueryDatabaseTableTest extends
AbstractDatabaseConnectionServiceTes
runner.assertAllFlowFilesTransferred(QueryDatabaseTable.REL_SUCCESS,
0);
runner.clearTransferState();
- //Remove Max Rows Per Flow File
+ //Remove Max Rows Per FlowFile
runner.setProperty(QueryDatabaseTable.MAX_ROWS_PER_FLOW_FILE, "0");
// Add a new row with a higher ID and run, one flowfile with one new
row should be transferred
@@ -830,7 +830,7 @@ public class QueryDatabaseTableTest extends
AbstractDatabaseConnectionServiceTes
runner.assertAllFlowFilesTransferred(QueryDatabaseTable.REL_SUCCESS,
0);
runner.clearTransferState();
- //Remove Max Rows Per Flow File
+ //Remove Max Rows Per FlowFile
runner.setProperty(QueryDatabaseTable.MAX_ROWS_PER_FLOW_FILE, "0");
// Add a new row with a higher ID and run, one flowfile with one new
row should be transferred
@@ -958,7 +958,7 @@ public class QueryDatabaseTableTest extends
AbstractDatabaseConnectionServiceTes
runner.assertAllFlowFilesTransferred(QueryDatabaseTable.REL_SUCCESS,
0);
runner.clearTransferState();
- //Remove Max Rows Per Flow File
+ //Remove Max Rows Per FlowFile
runner.setProperty(QueryDatabaseTable.MAX_ROWS_PER_FLOW_FILE, "0");
// Add a new row with a higher ID and run, one flowfile with one new
row should be transferred
@@ -1079,6 +1079,7 @@ public class QueryDatabaseTableTest extends
AbstractDatabaseConnectionServiceTes
Map.entry("db-fetch-where-clause",
AbstractDatabaseFetchProcessor.WHERE_CLAUSE.getName()),
Map.entry("db-fetch-sql-query",
AbstractDatabaseFetchProcessor.SQL_QUERY.getName()),
Map.entry("qdbt-max-rows",
AbstractQueryDatabaseTable.MAX_ROWS_PER_FLOW_FILE.getName()),
+ Map.entry("Max Rows Per Flow File",
AbstractQueryDatabaseTable.MAX_ROWS_PER_FLOW_FILE.getName()),
Map.entry("qdbt-output-batch-size",
AbstractQueryDatabaseTable.OUTPUT_BATCH_SIZE.getName()),
Map.entry("qdbt-max-frags",
AbstractQueryDatabaseTable.MAX_FRAGMENTS.getName()),
Map.entry("transaction-isolation-level",
AbstractQueryDatabaseTable.TRANS_ISOLATION_LEVEL.getName()),
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQL.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQL.java
index 77f4f5af73..d1ecd89ae0 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQL.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQL.java
@@ -627,6 +627,7 @@ public class TestExecuteSQL extends
AbstractDatabaseConnectionServiceTest {
Map.entry("SQL select query",
AbstractExecuteSQL.SQL_QUERY.getName()),
Map.entry("sql-post-query",
AbstractExecuteSQL.SQL_POST_QUERY.getName()),
Map.entry("esql-max-rows",
AbstractExecuteSQL.MAX_ROWS_PER_FLOW_FILE.getName()),
+ Map.entry("Max Rows Per Flow File",
AbstractExecuteSQL.MAX_ROWS_PER_FLOW_FILE.getName()),
Map.entry("esql-output-batch-size",
AbstractExecuteSQL.OUTPUT_BATCH_SIZE.getName()),
Map.entry("esql-fetch-size",
AbstractExecuteSQL.FETCH_SIZE.getName()),
Map.entry("esql-auto-commit",
AbstractExecuteSQL.AUTO_COMMIT.getName()),
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQLRecord.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQLRecord.java
index ea9d57c756..8063e4a66a 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQLRecord.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQLRecord.java
@@ -663,6 +663,7 @@ class TestExecuteSQLRecord extends
AbstractDatabaseConnectionServiceTest {
Map.entry("SQL select query",
AbstractExecuteSQL.SQL_QUERY.getName()),
Map.entry("sql-post-query",
AbstractExecuteSQL.SQL_POST_QUERY.getName()),
Map.entry("esql-max-rows",
AbstractExecuteSQL.MAX_ROWS_PER_FLOW_FILE.getName()),
+ Map.entry("Max Rows Per Flow File",
AbstractExecuteSQL.MAX_ROWS_PER_FLOW_FILE.getName()),
Map.entry("esql-output-batch-size",
AbstractExecuteSQL.OUTPUT_BATCH_SIZE.getName()),
Map.entry("esql-fetch-size",
AbstractExecuteSQL.FETCH_SIZE.getName()),
Map.entry("esql-auto-commit",
AbstractExecuteSQL.AUTO_COMMIT.getName()),
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-reporting-tasks/src/main/java/org/apache/nifi/controller/ControllerStatusReportingTask.java
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-reporting-tasks/src/main/java/org/apache/nifi/controller/ControllerStatusReportingTask.java
index 53b96db3f8..f0d662f3a4 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-reporting-tasks/src/main/java/org/apache/nifi/controller/ControllerStatusReportingTask.java
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-reporting-tasks/src/main/java/org/apache/nifi/controller/ControllerStatusReportingTask.java
@@ -111,12 +111,12 @@ public class ControllerStatusReportingTask extends
AbstractReportingTask {
final boolean showDeltas =
context.getProperty(SHOW_DELTAS).asBoolean();
connectionLineFormat = showDeltas ? CONNECTION_LINE_FORMAT_WITH_DELTA
: CONNECTION_LINE_FORMAT_NO_DELTA;
- connectionHeader = String.format(connectionLineFormat, "Connection
ID", "Source", "Connection Name", "Destination", "Flow Files In", "Flow Files
Out", "FlowFiles Queued");
+ connectionHeader = String.format(connectionLineFormat, "Connection
ID", "Source", "Connection Name", "Destination", "FlowFiles In", "FlowFiles
Out", "FlowFiles Queued");
connectionBorderLine = createLine(connectionHeader);
processorLineFormat = showDeltas ? PROCESSOR_LINE_FORMAT_WITH_DELTA :
PROCESSOR_LINE_FORMAT_NO_DELTA;
- processorHeader = String.format(processorLineFormat, "Processor Name",
"Processor ID", "Processor Type", "Run Status", "Flow Files In",
- "Flow Files Out", "Bytes Read", "Bytes Written", "Tasks",
"Proc Time");
+ processorHeader = String.format(processorLineFormat, "Processor Name",
"Processor ID", "Processor Type", "Run Status", "FlowFiles In",
+ "FlowFiles Out", "Bytes Read", "Bytes Written", "Tasks", "Proc
Time");
processorBorderLine = createLine(processorHeader);
counterHeader = String.format(COUNTER_LINE_FORMAT, "Counter Context",
"Counter Name", "Counter Value");
diff --git
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-reporting-tasks/src/main/resources/docs/org.apache.nifi.controller.ControllerStatusReportingTask/additionalDetails.md
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-reporting-tasks/src/main/resources/docs/org.apache.nifi.controller.ControllerStatusReportingTask/additionalDetails.md
index 5d5eec384c..6c2758dca7 100644
---
a/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-reporting-tasks/src/main/resources/docs/org.apache.nifi.controller.ControllerStatusReportingTask/additionalDetails.md
+++
b/nifi-extension-bundles/nifi-standard-bundle/nifi-standard-reporting-tasks/src/main/resources/docs/org.apache.nifi.controller.ControllerStatusReportingTask/additionalDetails.md
@@ -24,7 +24,7 @@ following information is included (sorted by descending
Processing Timing):
* Processor ID
* Processor Type
* Run Status
-* Flow Files In (5 mins)
+* FlowFiles In (5 mins)
* FlowFiles Out (5 mins)
* Bytes Read from Disk (5 mins)
* Bytes Written to Disk (5 mins)
@@ -37,7 +37,7 @@ For Connections, the following information is included
(sorted by descending siz
* Connection ID
* Source Component Name
* Destination Component Name
-* Flow Files In (5 mins)
+* FlowFiles In (5 mins)
* FlowFiles Out (5 mins)
* FlowFiles Queued
diff --git
a/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/ControllerDTO.java
b/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/ControllerDTO.java
index 878e94ec27..935772807e 100644
---
a/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/ControllerDTO.java
+++
b/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/ControllerDTO.java
@@ -129,11 +129,11 @@ public class ControllerDTO {
}
/**
- * The Socket Port on which this instance is listening for Remote
Transfers of Flow Files. If this instance is not configured to receive Flow
Files from remote instances, this will be null.
+ * The Socket Port on which this instance is listening for Remote
Transfers of FlowFiles. If this instance is not configured to receive FlowFiles
from remote instances, this will be null.
*
- * @return a integer between 1 and 65535, or null, if not configured for
remote transfer
+ * @return an integer between 1 and 65535, or null, if not configured for
remote transfer
*/
- @Schema(description = "The Socket Port on which this instance is listening
for Remote Transfers of Flow Files. If this instance is not configured to
receive Flow Files from remote "
+ @Schema(description = "The Socket Port on which this instance is listening
for Remote Transfers of FlowFiles. If this instance is not configured to
receive FlowFiles from remote "
+ "instances, this will be null."
)
public Integer getRemoteSiteListeningPort() {
@@ -145,11 +145,11 @@ public class ControllerDTO {
}
/**
- * The HTTP(S) Port on which this instance is listening for Remote
Transfers of Flow Files. If this instance is not configured to receive Flow
Files from remote instances, this will be null.
+ * The HTTP(S) Port on which this instance is listening for Remote
Transfers of FlowFiles. If this instance is not configured to receive FlowFiles
from remote instances, this will be null.
*
- * @return a integer between 1 and 65535, or null, if not configured for
remote transfer
+ * @return an integer between 1 and 65535, or null, if not configured for
remote transfer
*/
- @Schema(description = "The HTTP(S) Port on which this instance is
listening for Remote Transfers of Flow Files. If this instance is not
configured to receive Flow Files from remote "
+ @Schema(description = "The HTTP(S) Port on which this instance is
listening for Remote Transfers of FlowFiles. If this instance is not configured
to receive FlowFiles from remote "
+ "instances, this will be null."
)
public Integer getRemoteSiteHttpListeningPort() {
diff --git
a/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/AbstractPort.java
b/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/AbstractPort.java
index ae6f8f9071..4edb4b8e2c 100644
---
a/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/AbstractPort.java
+++
b/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/AbstractPort.java
@@ -62,7 +62,7 @@ public abstract class AbstractPort implements Port {
private static final Logger logger =
LoggerFactory.getLogger(AbstractPort.class);
public static final Relationship PORT_RELATIONSHIP = new
Relationship.Builder()
- .description("The relationship through which all Flow Files are
transferred")
+ .description("The relationship through which all FlowFiles are
transferred")
.name("")
.build();
diff --git
a/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowService.java
b/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowService.java
index 5de3486164..7755030322 100644
---
a/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowService.java
+++
b/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowService.java
@@ -398,7 +398,7 @@ public class StandardFlowService implements FlowService,
ProtocolHandler {
} catch (InterruptedException e) {
throw new ProtocolException("Could not complete
offload request", e);
}
- }, "Offload Flow Files from Node");
+ }, "Offload FlowFiles from Node");
t.setDaemon(true);
t.start();
diff --git
a/nifi-framework-bundle/nifi-framework/nifi-framework-status-history-shared/src/main/java/org/apache/nifi/controller/status/history/NodeStatusDescriptor.java
b/nifi-framework-bundle/nifi-framework/nifi-framework-status-history-shared/src/main/java/org/apache/nifi/controller/status/history/NodeStatusDescriptor.java
index 739b04b2ab..cae700a18a 100644
---
a/nifi-framework-bundle/nifi-framework/nifi-framework-status-history-shared/src/main/java/org/apache/nifi/controller/status/history/NodeStatusDescriptor.java
+++
b/nifi-framework-bundle/nifi-framework/nifi-framework-status-history-shared/src/main/java/org/apache/nifi/controller/status/history/NodeStatusDescriptor.java
@@ -100,13 +100,13 @@ public enum NodeStatusDescriptor {
NodeStatus::getTimerDrivenThreads),
FLOW_FILE_REPOSITORY_FREE_SPACE(
"flowFileRepositoryFreeSpace",
- "Flow File Repository Free Space",
+ "FlowFile Repository Free Space",
"The usable space available for file repositories on the
underlying storage mechanism",
MetricDescriptor.Formatter.DATA_SIZE,
NodeStatus::getFlowFileRepositoryFreeSpace),
FLOW_FILE_REPOSITORY_USED_SPACE(
"flowFileRepositoryUsedSpace",
- "Flow File Repository Used Space",
+ "FlowFile Repository Used Space",
"The space in use on the underlying storage mechanism.",
MetricDescriptor.Formatter.DATA_SIZE,
NodeStatus::getFlowFileRepositoryUsedSpace),
diff --git
a/nifi-frontend/src/main/frontend/apps/nifi-jolt-transform-ui/src/app/pages/jolt-transform-json-ui/feature/jolt-transform-json-ui.component.html
b/nifi-frontend/src/main/frontend/apps/nifi-jolt-transform-ui/src/app/pages/jolt-transform-json-ui/feature/jolt-transform-json-ui.component.html
index e503e77d53..64975f6b0f 100644
---
a/nifi-frontend/src/main/frontend/apps/nifi-jolt-transform-ui/src/app/pages/jolt-transform-json-ui/feature/jolt-transform-json-ui.component.html
+++
b/nifi-frontend/src/main/frontend/apps/nifi-jolt-transform-ui/src/app/pages/jolt-transform-json-ui/feature/jolt-transform-json-ui.component.html
@@ -206,7 +206,7 @@
<div class="w-1/3 pl-2 pr-2 flex-1 flex-col">
<mat-card class="h-full">
<mat-card-header>
- <mat-card-title>Flow File Input</mat-card-title>
+ <mat-card-title>FlowFile Input</mat-card-title>
<mat-card-subtitle>
<div class="overflow-ellipsis overflow-hidden
whitespace-nowrap">
<i