This is an automated email from the ASF dual-hosted git repository.

nfilotto pushed a commit to branch CAMEL-17792/doc-message-headers
in repository https://gitbox.apache.org/repos/asf/camel.git

commit e9b7397df92b6ee84f29feb00aeaa8984bde204e
Author: Nicolas Filotto <[email protected]>
AuthorDate: Wed Mar 23 12:07:48 2022 +0100

    CAMEL-17792: Add doc about the message headers of camel-hdfs
---
 .../org/apache/camel/component/hdfs/hdfs.json        |  8 ++++++++
 .../camel-hdfs/src/main/docs/hdfs-component.adoc     | 20 +++-----------------
 .../apache/camel/component/hdfs/HdfsConstants.java   | 18 +++++++++++++++++-
 .../apache/camel/component/hdfs/HdfsConsumer.java    | 10 +++++-----
 .../apache/camel/component/hdfs/HdfsEndpoint.java    |  2 +-
 .../apache/camel/component/hdfs/HdfsProducer.java    |  6 +++---
 6 files changed, 37 insertions(+), 27 deletions(-)

diff --git 
a/components/camel-hdfs/src/generated/resources/org/apache/camel/component/hdfs/hdfs.json
 
b/components/camel-hdfs/src/generated/resources/org/apache/camel/component/hdfs/hdfs.json
index ec213ee..398f723 100644
--- 
a/components/camel-hdfs/src/generated/resources/org/apache/camel/component/hdfs/hdfs.json
+++ 
b/components/camel-hdfs/src/generated/resources/org/apache/camel/component/hdfs/hdfs.json
@@ -28,6 +28,14 @@
     "jAASConfiguration": { "kind": "property", "displayName": 
"JAASConfiguration", "group": "security", "label": "security", "required": 
false, "type": "object", "javaType": "javax.security.auth.login.Configuration", 
"deprecated": false, "deprecationNote": "", "autowired": false, "secret": 
false, "description": "To use the given configuration for security with JAAS." 
},
     "kerberosConfigFile": { "kind": "property", "displayName": "Kerberos 
Config File", "group": "security", "label": "security", "required": false, 
"type": "string", "javaType": "java.lang.String", "deprecated": false, 
"deprecationNote": "", "autowired": false, "secret": false, "description": "To 
use kerberos authentication, set the value of the 'java.security.krb5.conf' 
environment variable to an existing file. If the environment variable is 
already set, warn if different than the speci [...]
   },
+  "headers": {
+    "CamelHdfsClose": { "kind": "header", "displayName": "", "group": 
"producer", "label": "producer", "required": false, "javaType": "Boolean", 
"deprecated": false, "deprecationNote": "", "autowired": false, "secret": 
false, "description": "Indicates to close the stream" },
+    "CamelFileName": { "kind": "header", "displayName": "", "group": "common", 
"label": "", "required": false, "javaType": "String", "deprecated": false, 
"deprecationNote": "", "autowired": false, "secret": false, "description": 
"(producer) Specifies the name of the file to write (relative to the\nendpoint 
path). The name can be a `String` or an\nExpression object. Only relevant when 
not using a\nsplit strategy. (consumer) Specifies the name of the file to read" 
},
+    "CamelFileNameConsumed": { "kind": "header", "displayName": "", "group": 
"consumer", "label": "consumer", "required": false, "javaType": "String", 
"deprecated": false, "deprecationNote": "", "autowired": false, "secret": 
false, "description": "The name of the file consumed" },
+    "CamelFileAbsolutePath": { "kind": "header", "displayName": "", "group": 
"consumer", "label": "consumer", "required": false, "javaType": "String", 
"deprecated": false, "deprecationNote": "", "autowired": false, "secret": 
false, "description": "The absolute path of the file" },
+    "KEY": { "kind": "header", "displayName": "", "group": "common", "label": 
"", "required": false, "javaType": "Object", "deprecated": false, 
"deprecationNote": "", "autowired": false, "secret": false, "description": "The 
HDFS key" },
+    "CamelFileLength": { "kind": "header", "displayName": "", "group": 
"consumer", "label": "consumer", "required": false, "javaType": "Long", 
"deprecated": false, "deprecationNote": "", "autowired": false, "secret": 
false, "description": "The size of the file" }
+  },
   "properties": {
     "hostName": { "kind": "path", "displayName": "Host Name", "group": 
"common", "label": "", "required": true, "type": "string", "javaType": 
"java.lang.String", "deprecated": false, "deprecationNote": "", "autowired": 
false, "secret": false, "configurationClass": 
"org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": 
"config", "description": "HDFS host to use" },
     "port": { "kind": "path", "displayName": "Port", "group": "common", 
"label": "", "required": false, "type": "integer", "javaType": "int", 
"deprecated": false, "autowired": false, "secret": false, "defaultValue": 8020, 
"configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", 
"configurationField": "config", "description": "HDFS port to use" },
diff --git a/components/camel-hdfs/src/main/docs/hdfs-component.adoc 
b/components/camel-hdfs/src/main/docs/hdfs-component.adoc
index b98164a..0b13918 100644
--- a/components/camel-hdfs/src/main/docs/hdfs-component.adoc
+++ b/components/camel-hdfs/src/main/docs/hdfs-component.adoc
@@ -65,7 +65,9 @@ include::partial$component-endpoint-options.adoc[]
 
 // endpoint options: END
 
-
+// component headers: START
+include::partial$component-endpoint-headers.adoc[]
+// component headers: END
 
 == KeyType and ValueType
 
@@ -122,22 +124,6 @@ than 1 second or if more than 5 bytes have been written. 
So, running
 `hadoop fs -ls /tmp/simple-file` you'll see that multiple files have
 been created.
 
-== Message Headers
-
-The following headers are supported by this component:
-
-=== Producer only
-
-[width="100%",cols="10%,90%",options="header",]
-|=======================================================================
-|Header |Description
-
-|`CamelFileName` |Specifies the name of the file to write (relative to the
-endpoint path). The name can be a `String` or an
-Expression object. Only relevant when not using a
-split strategy.
-|=======================================================================
-
 == Controlling to close file stream
 
 When using the xref:hdfs-component.adoc[HDFS] producer *without* a split
diff --git 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConstants.java
 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConstants.java
index 3572ffc..a2a4e30 100644
--- 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConstants.java
+++ 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConstants.java
@@ -16,6 +16,8 @@
  */
 package org.apache.camel.component.hdfs;
 
+import org.apache.camel.Exchange;
+import org.apache.camel.spi.Metadata;
 import org.apache.hadoop.io.SequenceFile;
 
 public final class HdfsConstants {
@@ -43,10 +45,24 @@ public final class HdfsConstants {
     public static final String DEFAULT_PATTERN = "*";
 
     public static final int DEFAULT_CHECK_IDLE_INTERVAL = 500;
-
+    @Metadata(label = "producer", description = "Indicates to close the 
stream", javaType = "Boolean")
     public static final String HDFS_CLOSE = "CamelHdfsClose";
 
     public static final int DEFAULT_MAX_MESSAGES_PER_POLL = 100;
+    @Metadata(description = "(producer) Specifies the name of the file to 
write (relative to the\n" +
+                            "endpoint path). The name can be a `String` or 
an\n" +
+                            "Expression object. Only relevant when not using 
a\n" +
+                            "split strategy. (consumer) Specifies the name of 
the file to read",
+              javaType = "String")
+    public static final String FILE_NAME = Exchange.FILE_NAME;
+    @Metadata(label = "consumer", description = "The name of the file 
consumed", javaType = "String")
+    public static final String FILE_NAME_CONSUMED = 
Exchange.FILE_NAME_CONSUMED;
+    @Metadata(label = "consumer", description = "The absolute path of the 
file", javaType = "String")
+    public static final String FILE_ABSOLUTE_PATH = "CamelFileAbsolutePath";
+    @Metadata(description = "The HDFS key", javaType = "Object")
+    public static final String KEY = HdfsHeader.KEY.name();
+    @Metadata(label = "consumer", description = "The size of the file", 
javaType = "Long")
+    public static final String FILE_LENGTH = Exchange.FILE_LENGTH;
 
     private HdfsConstants() {
     }
diff --git 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConsumer.java
 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConsumer.java
index 1e38fc6..5c909c5 100644
--- 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConsumer.java
+++ 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConsumer.java
@@ -184,15 +184,15 @@ public final class HdfsConsumer extends 
ScheduledPollConsumer {
         try {
             Message message = exchange.getIn();
             String fileName = 
StringUtils.substringAfterLast(hdfsFile.getActualPath(), "/");
-            message.setHeader(Exchange.FILE_NAME, fileName);
-            message.setHeader(Exchange.FILE_NAME_CONSUMED, fileName);
-            message.setHeader("CamelFileAbsolutePath", 
hdfsFile.getActualPath());
+            message.setHeader(HdfsConstants.FILE_NAME, fileName);
+            message.setHeader(HdfsConstants.FILE_NAME_CONSUMED, fileName);
+            message.setHeader(HdfsConstants.FILE_ABSOLUTE_PATH, 
hdfsFile.getActualPath());
             if (key.getValue() != null) {
-                message.setHeader(HdfsHeader.KEY.name(), key.getValue());
+                message.setHeader(HdfsConstants.KEY, key.getValue());
             }
 
             if (hdfsFile.getNumOfReadBytes() >= 0) {
-                message.setHeader(Exchange.FILE_LENGTH, 
hdfsFile.getNumOfReadBytes());
+                message.setHeader(HdfsConstants.FILE_LENGTH, 
hdfsFile.getNumOfReadBytes());
             }
 
             message.setBody(value.getValue());
diff --git 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsEndpoint.java
 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsEndpoint.java
index 0a84783..f99614d4 100644
--- 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsEndpoint.java
+++ 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsEndpoint.java
@@ -31,7 +31,7 @@ import org.apache.camel.support.ScheduledPollEndpoint;
  * Read and write from/to an HDFS filesystem using Hadoop 2.x.
  */
 @UriEndpoint(firstVersion = "2.14.0", scheme = "hdfs", title = "HDFS", syntax 
= "hdfs:hostName:port/path",
-             category = { Category.BIGDATA, Category.HADOOP, Category.FILE })
+             category = { Category.BIGDATA, Category.HADOOP, Category.FILE }, 
headersClass = HdfsConstants.class)
 public class HdfsEndpoint extends ScheduledPollEndpoint {
 
     @UriParam
diff --git 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsProducer.java
 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsProducer.java
index f829034..8439075 100644
--- 
a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsProducer.java
+++ 
b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsProducer.java
@@ -193,11 +193,11 @@ public class HdfsProducer extends DefaultProducer {
 
     void doProcess(Exchange exchange) throws IOException {
         Object body = exchange.getIn().getBody();
-        Object key = exchange.getIn().getHeader(HdfsHeader.KEY.name());
+        Object key = exchange.getIn().getHeader(HdfsConstants.KEY);
 
         HdfsInfoFactory hdfsInfoFactory = new HdfsInfoFactory(config);
         // if an explicit filename is specified, close any existing stream and 
append the filename to the hdfsPath
-        if (exchange.getIn().getHeader(Exchange.FILE_NAME) != null) {
+        if (exchange.getIn().getHeader(HdfsConstants.FILE_NAME) != null) {
             if (oStream != null) {
                 IOHelper.close(oStream, "output stream", LOG);
             }
@@ -250,7 +250,7 @@ public class HdfsProducer extends DefaultProducer {
     private StringBuilder getHdfsPathUsingFileNameHeader(Exchange exchange) {
         StringBuilder actualPath = new StringBuilder(hdfsPath);
         String fileName = "";
-        Object value = exchange.getIn().getHeader(Exchange.FILE_NAME);
+        Object value = exchange.getIn().getHeader(HdfsConstants.FILE_NAME);
         if (value instanceof String) {
             fileName = 
exchange.getContext().getTypeConverter().convertTo(String.class, exchange, 
value);
         } else if (value instanceof Expression) {

Reply via email to