This is an automated email from the ASF dual-hosted git repository.
davsclaus pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel-kafka-connector.git
The following commit(s) were added to refs/heads/main by this push:
new ed622b4f16 Rrf (#1621)
ed622b4f16 is described below
commit ed622b4f16a92958bd05f1d26039158bbf3e40df
Author: Claus Ibsen <[email protected]>
AuthorDate: Mon May 6 18:34:39 2024 +0200
Rrf (#1621)
* #1618: Block atlassian maven repo so build is faster
* Regen
* Regen
---
.mvn/maven.config | 2 +
.mvn/rrf/groupId-B_shibboleth.txt | 3 +
.mvn/rrf/groupId-atlassian.txt | 7 +
.../camel-azure-storage-files-sink-sink.json | 45 ++++++
.../camel-azure-storage-files-source-source.json | 66 +++++++++
...picurio-registry-not-secured-source-source.json | 106 +++++++++++++++
...-batch-azure-schema-registry-source-source.json | 125 +++++++++++++++++
...amel-kafka-batch-not-secured-source-source.json | 86 ++++++++++++
.../camel-kafka-batch-scram-source-source.json | 112 +++++++++++++++
.../camel-kafka-batch-source-source.json | 112 +++++++++++++++
.../camel-kafka-batch-ssl-source-source.json | 151 +++++++++++++++++++++
.../connectors/camel-snowflake-sink-sink.json | 43 ++++++
.../connectors/camel-snowflake-source-source.json | 56 ++++++++
.../camel-spring-rabbitmq-sink-sink.json | 55 ++++++++
.../camel-spring-rabbitmq-source-source.json | 62 +++++++++
.../camel-postgresql-sink-kafka-connector/pom.xml | 2 +-
.../pom.xml | 2 +-
17 files changed, 1033 insertions(+), 2 deletions(-)
diff --git a/.mvn/maven.config b/.mvn/maven.config
new file mode 100644
index 0000000000..7408098dbe
--- /dev/null
+++ b/.mvn/maven.config
@@ -0,0 +1,2 @@
+-Daether.remoteRepositoryFilter.groupId=true
+-Daether.remoteRepositoryFilter.groupId.basedir=${session.rootDirectory}/.mvn/rrf/
diff --git a/.mvn/rrf/groupId-B_shibboleth.txt
b/.mvn/rrf/groupId-B_shibboleth.txt
new file mode 100644
index 0000000000..0a64483308
--- /dev/null
+++ b/.mvn/rrf/groupId-B_shibboleth.txt
@@ -0,0 +1,3 @@
+net.shibboleth
+net.shibboleth.utilities
+org.opensaml
diff --git a/.mvn/rrf/groupId-atlassian.txt b/.mvn/rrf/groupId-atlassian.txt
new file mode 100644
index 0000000000..45b48708ed
--- /dev/null
+++ b/.mvn/rrf/groupId-atlassian.txt
@@ -0,0 +1,7 @@
+com.atlassian.event
+com.atlassian.httpclient
+com.atlassian.jira
+com.atlassian.platform
+com.atlassian.pom
+com.atlassian.sal
+io.atlassian.fugue
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-azure-storage-files-sink-sink.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-azure-storage-files-sink-sink.json
new file mode 100644
index 0000000000..2d80946176
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-azure-storage-files-sink-sink.json
@@ -0,0 +1,45 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.azurestoragefilessink.CamelAzurestoragefilessinkSinkConnector",
+ "artifactId": "camel-azure-storage-files-sink-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-azure-storage-files-sink-sink",
+ "type": "sink",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Upload data to Azure Storage Files Share.\n\nIn
the header, you can set the `file` \/ `ce-file` property to specify the
filename to upload. If you do set property in the header, the Kamelet uses the
exchange ID as filename."
+ },
+ "properties": {
+ "camel.kamelet.azure-storage-files-sink.accountName": {
+ "name":
"camel.kamelet.azure-storage-files-sink.accountName",
+ "description": "The Azure Storage Blob account name.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.azure-storage-files-sink.shareName": {
+ "name":
"camel.kamelet.azure-storage-files-sink.shareName",
+ "description": "The Azure Storage File Share share
name.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.azure-storage-files-sink.sharedKey": {
+ "name":
"camel.kamelet.azure-storage-files-sink.sharedKey",
+ "description": "The Azure Storage Blob access key.",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.azure-storage-files-sink.credentialType": {
+ "name":
"camel.kamelet.azure-storage-files-sink.credentialType",
+ "description": "Determines the credential strategy to
adopt.",
+ "defaultValue": "\"SHARED_ACCOUNT_KEY\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.azure-storage-files-sink.directoryName": {
+ "name":
"camel.kamelet.azure-storage-files-sink.directoryName",
+ "description": "The directory from where the producer
will upload the file.",
+ "defaultValue": "\".\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-azure-storage-files-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-azure-storage-files-source-source.json
new file mode 100644
index 0000000000..1c682ca28a
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-azure-storage-files-source-source.json
@@ -0,0 +1,66 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.azurestoragefilessource.CamelAzurestoragefilessourceSourceConnector",
+ "artifactId":
"camel-azure-storage-files-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-azure-storage-files-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Consume files from Azure Storage File Shares."
+ },
+ "properties": {
+ "camel.kamelet.azure-storage-files-source.accountName": {
+ "name":
"camel.kamelet.azure-storage-files-source.accountName",
+ "description": "The Azure Storage File Share account
name.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.azure-storage-files-source.shareName": {
+ "name":
"camel.kamelet.azure-storage-files-source.shareName",
+ "description": "The Azure Storage File Share share
name.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.azure-storage-files-source.sharedKey": {
+ "name":
"camel.kamelet.azure-storage-files-source.sharedKey",
+ "description": "The Azure Storage Blob access key.",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.azure-storage-files-source.delay": {
+ "name":
"camel.kamelet.azure-storage-files-source.delay",
+ "description": "The number of milliseconds before the
next poll of the selected blob.",
+ "defaultValue": "500",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.azure-storage-files-source.deleteAfterRead": {
+ "name":
"camel.kamelet.azure-storage-files-source.deleteAfterRead",
+ "description": "Specifies to delete blobs after
consuming them",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.azure-storage-files-source.credentialType": {
+ "name":
"camel.kamelet.azure-storage-files-source.credentialType",
+ "description": "Determines the credential strategy to
adopt.",
+ "defaultValue": "\"SHARED_ACCOUNT_KEY\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.azure-storage-files-source.directoryName": {
+ "name":
"camel.kamelet.azure-storage-files-source.directoryName",
+ "description": "The directory from where the consumer
will start reading files.",
+ "defaultValue": "\".\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.azure-storage-files-source.recursive": {
+ "name":
"camel.kamelet.azure-storage-files-source.recursive",
+ "description": "If a directory, the consumer will look
for files in all the sub-directories as well.",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-apicurio-registry-not-secured-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-apicurio-registry-not-secured-source-source.json
new file mode 100644
index 0000000000..f2cf09d16e
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-apicurio-registry-not-secured-source-source.json
@@ -0,0 +1,106 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.kafkabatchapicurioregistrynotsecuredsource.CamelKafkabatchapicurioregistrynotsecuredsourceSourceConnector",
+ "artifactId":
"camel-kafka-batch-apicurio-registry-not-secured-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id":
"camel-kafka-batch-apicurio-registry-not-secured-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Receive data from Kafka topics in batch on an
insecure broker combined with Apicurio Registry and commit them manually
through KafkaManualCommit or auto commit."
+ },
+ "properties": {
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.topic": {
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.topic",
+ "description": "Comma separated list of Kafka topic
names",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.bootstrapServers":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.bootstrapServers",
+ "description": "Comma separated list of Kafka Broker
URLs",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.autoCommitEnable":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.autoCommitEnable",
+ "description": "If true, periodically commit to
ZooKeeper the offset of messages already fetched by the consumer",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.allowManualCommit":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.allowManualCommit",
+ "description": "Whether to allow doing manual commits",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.pollOnError": {
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.pollOnError",
+ "description": "What to do if kafka threw an exception
while polling for new messages. There are 5 enums and the value can be one of
DISCARD, ERROR_HANDLER, RECONNECT, RETRY, STOP",
+ "defaultValue": "\"ERROR_HANDLER\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.autoOffsetReset":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.autoOffsetReset",
+ "description": "What to do when there is no initial
offset. There are 3 enums and the value can be one of latest, earliest, none",
+ "defaultValue": "\"latest\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.consumerGroup":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.consumerGroup",
+ "description": "A string that uniquely identifies the
group of consumers to which this source belongs Example: my-group-id",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.deserializeHeaders":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.deserializeHeaders",
+ "description": "When enabled the Kamelet source will
deserialize all message headers to String representation.",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.valueDeserializer":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.valueDeserializer",
+ "description": "Deserializer class for value that
implements the Deserializer interface.",
+ "defaultValue":
"\"io.apicurio.registry.serde.avro.AvroKafkaDeserializer\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.apicurioRegistryUrl":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.apicurioRegistryUrl",
+ "description": "The Apicurio Schema Registry URL",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.avroDatumProvider":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.avroDatumProvider",
+ "description": "How to read data with Avro",
+ "defaultValue":
"\"io.apicurio.registry.serde.avro.ReflectAvroDatumProvider\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.batchSize": {
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.batchSize",
+ "description": "The maximum number of records returned
in a single call to poll()",
+ "defaultValue": "500",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.pollTimeout": {
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.pollTimeout",
+ "description": "The timeout used when polling the
KafkaConsumer",
+ "defaultValue": "5000",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.maxPollIntervalMs":
{
+ "name":
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.maxPollIntervalMs",
+ "description": "The maximum delay between invocations
of poll() when using consumer group management",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-azure-schema-registry-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-azure-schema-registry-source-source.json
new file mode 100644
index 0000000000..55f254dbe9
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-azure-schema-registry-source-source.json
@@ -0,0 +1,125 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.kafkabatchazureschemaregistrysource.CamelKafkabatchazureschemaregistrysourceSourceConnector",
+ "artifactId":
"camel-kafka-batch-azure-schema-registry-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-kafka-batch-azure-schema-registry-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Receive data from Kafka topics in batch on
Azure Eventhubs combined with Azure Schema Registry and commit them manually
through KafkaManualCommit or auto commit."
+ },
+ "properties": {
+ "camel.kamelet.kafka-batch-azure-schema-registry-source.topic":
{
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.topic",
+ "description": "Comma separated list of Kafka topic
names",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.bootstrapServers": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.bootstrapServers",
+ "description": "Comma separated list of Kafka Broker
URLs",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.securityProtocol": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.securityProtocol",
+ "description": "Protocol used to communicate with
brokers. SASL_PLAINTEXT, PLAINTEXT, SASL_SSL and SSL are supported",
+ "defaultValue": "\"SASL_SSL\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.saslMechanism": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.saslMechanism",
+ "description": "The Simple Authentication and Security
Layer (SASL) Mechanism used.",
+ "defaultValue": "\"PLAIN\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.password": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.password",
+ "description": "Password to authenticate to kafka",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.autoCommitEnable": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.autoCommitEnable",
+ "description": "If true, periodically commit to
ZooKeeper the offset of messages already fetched by the consumer",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.allowManualCommit": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.allowManualCommit",
+ "description": "Whether to allow doing manual commits",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.pollOnError": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.pollOnError",
+ "description": "What to do if kafka threw an exception
while polling for new messages. There are 5 enums and the value can be one of
DISCARD, ERROR_HANDLER, RECONNECT, RETRY, STOP",
+ "defaultValue": "\"ERROR_HANDLER\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.autoOffsetReset": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.autoOffsetReset",
+ "description": "What to do when there is no initial
offset. There are 3 enums and the value can be one of latest, earliest, none",
+ "defaultValue": "\"latest\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.consumerGroup": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.consumerGroup",
+ "description": "A string that uniquely identifies the
group of consumers to which this source belongs Example: my-group-id",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.deserializeHeaders": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.deserializeHeaders",
+ "description": "When enabled the Kamelet source will
deserialize all message headers to String representation.",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.valueDeserializer": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.valueDeserializer",
+ "description": "Deserializer class for value that
implements the Deserializer interface.",
+ "defaultValue":
"\"com.microsoft.azure.schemaregistry.kafka.avro.KafkaAvroDeserializer\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.azureRegistryUrl": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.azureRegistryUrl",
+ "description": "The Apicurio Schema Registry URL",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.specificAvroValueType":
{
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.specificAvroValueType",
+ "description": "The Specific Type Avro will have to
deal with Example: com.example.Order",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.batchSize": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.batchSize",
+ "description": "The maximum number of records returned
in a single call to poll()",
+ "defaultValue": "500",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.pollTimeout": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.pollTimeout",
+ "description": "The timeout used when polling the
KafkaConsumer",
+ "defaultValue": "5000",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-azure-schema-registry-source.maxPollIntervalMs": {
+ "name":
"camel.kamelet.kafka-batch-azure-schema-registry-source.maxPollIntervalMs",
+ "description": "The maximum delay between invocations
of poll() when using consumer group management",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-not-secured-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-not-secured-source-source.json
new file mode 100644
index 0000000000..83a2713986
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-not-secured-source-source.json
@@ -0,0 +1,86 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.kafkabatchnotsecuredsource.CamelKafkabatchnotsecuredsourceSourceConnector",
+ "artifactId":
"camel-kafka-batch-not-secured-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-kafka-batch-not-secured-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Receive data from Kafka topics in batch on an
insecure broker and commit them manually through KafkaManualCommit."
+ },
+ "properties": {
+ "camel.kamelet.kafka-batch-not-secured-source.topic": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.topic",
+ "description": "Comma separated list of Kafka topic
names",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-not-secured-source.bootstrapServers": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.bootstrapServers",
+ "description": "Comma separated list of Kafka Broker
URLs",
+ "priority": "HIGH",
+ "required": "true"
+ },
+
"camel.kamelet.kafka-batch-not-secured-source.autoCommitEnable": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.autoCommitEnable",
+ "description": "If true, periodically commit to
ZooKeeper the offset of messages already fetched by the consumer",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-not-secured-source.allowManualCommit": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.allowManualCommit",
+ "description": "Whether to allow doing manual commits",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-not-secured-source.pollOnError": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.pollOnError",
+ "description": "What to do if kafka threw an exception
while polling for new messages. There are 5 enums and the value can be one of
DISCARD, ERROR_HANDLER, RECONNECT, RETRY, STOP",
+ "defaultValue": "\"ERROR_HANDLER\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-not-secured-source.autoOffsetReset":
{
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.autoOffsetReset",
+ "description": "What to do when there is no initial
offset. There are 3 enums and the value can be one of latest, earliest, none",
+ "defaultValue": "\"latest\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-not-secured-source.consumerGroup": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.consumerGroup",
+ "description": "A string that uniquely identifies the
group of consumers to which this source belongs Example: my-group-id",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-not-secured-source.deserializeHeaders": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.deserializeHeaders",
+ "description": "When enabled the Kamelet source will
deserialize all message headers to String representation.",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-not-secured-source.batchSize": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.batchSize",
+ "description": "The maximum number of records returned
in a single call to poll()",
+ "defaultValue": "500",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-not-secured-source.pollTimeout": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.pollTimeout",
+ "description": "The timeout used when polling the
KafkaConsumer",
+ "defaultValue": "5000",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+
"camel.kamelet.kafka-batch-not-secured-source.maxPollIntervalMs": {
+ "name":
"camel.kamelet.kafka-batch-not-secured-source.maxPollIntervalMs",
+ "description": "The maximum delay between invocations
of poll() when using consumer group management",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-scram-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-scram-source-source.json
new file mode 100644
index 0000000000..8a41c66ef5
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-scram-source-source.json
@@ -0,0 +1,112 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.kafkabatchscramsource.CamelKafkabatchscramsourceSourceConnector",
+ "artifactId": "camel-kafka-batch-scram-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-kafka-batch-scram-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Receive data from Kafka topics in batch through
SCRAM login module and commit them manually through KafkaManualCommit.."
+ },
+ "properties": {
+ "camel.kamelet.kafka-batch-scram-source.topic": {
+ "name": "camel.kamelet.kafka-batch-scram-source.topic",
+ "description": "Comma separated list of Kafka topic
names",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-scram-source.bootstrapServers": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.bootstrapServers",
+ "description": "Comma separated list of Kafka Broker
URLs",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-scram-source.securityProtocol": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.securityProtocol",
+ "description": "Protocol used to communicate with
brokers. SASL_PLAINTEXT, PLAINTEXT, SASL_SSL and SSL are supported",
+ "defaultValue": "\"SASL_SSL\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.saslMechanism": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.saslMechanism",
+ "description": "The Simple Authentication and Security
Layer (SASL) Mechanism used.",
+ "defaultValue": "\"SCRAM-SHA-512\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.user": {
+ "name": "camel.kamelet.kafka-batch-scram-source.user",
+ "description": "Username to authenticate to Kafka",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-scram-source.password": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.password",
+ "description": "Password to authenticate to kafka",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-scram-source.autoCommitEnable": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.autoCommitEnable",
+ "description": "If true, periodically commit to
ZooKeeper the offset of messages already fetched by the consumer",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.allowManualCommit": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.allowManualCommit",
+ "description": "Whether to allow doing manual commits",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.pollOnError": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.pollOnError",
+ "description": "What to do if kafka threw an exception
while polling for new messages. There are 5 enums and the value can be one of
DISCARD, ERROR_HANDLER, RECONNECT, RETRY, STOP",
+ "defaultValue": "\"ERROR_HANDLER\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.autoOffsetReset": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.autoOffsetReset",
+ "description": "What to do when there is no initial
offset. There are 3 enums and the value can be one of latest, earliest, none",
+ "defaultValue": "\"latest\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.consumerGroup": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.consumerGroup",
+ "description": "A string that uniquely identifies the
group of consumers to which this source belongs Example: my-group-id",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.deserializeHeaders": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.deserializeHeaders",
+ "description": "When enabled the Kamelet source will
deserialize all message headers to String representation.",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.batchSize": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.batchSize",
+ "description": "The maximum number of records returned
in a single call to poll()",
+ "defaultValue": "500",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.pollTimeout": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.pollTimeout",
+ "description": "The timeout used when polling the
KafkaConsumer",
+ "defaultValue": "5000",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-scram-source.maxPollIntervalMs": {
+ "name":
"camel.kamelet.kafka-batch-scram-source.maxPollIntervalMs",
+ "description": "The maximum delay between invocations
of poll() when using consumer group management",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-source-source.json
new file mode 100644
index 0000000000..182fbb91ef
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-source-source.json
@@ -0,0 +1,112 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.kafkabatchsource.CamelKafkabatchsourceSourceConnector",
+ "artifactId": "camel-kafka-batch-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-kafka-batch-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Receive data from Kafka topics in batch through
Plain Login Module and commit them manually through KafkaManualCommit.."
+ },
+ "properties": {
+ "camel.kamelet.kafka-batch-source.topic": {
+ "name": "camel.kamelet.kafka-batch-source.topic",
+ "description": "Comma separated list of Kafka topic
names",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-source.bootstrapServers": {
+ "name":
"camel.kamelet.kafka-batch-source.bootstrapServers",
+ "description": "Comma separated list of Kafka Broker
URLs",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-source.securityProtocol": {
+ "name":
"camel.kamelet.kafka-batch-source.securityProtocol",
+ "description": "Protocol used to communicate with
brokers. SASL_PLAINTEXT, PLAINTEXT, SASL_SSL and SSL are supported",
+ "defaultValue": "\"SASL_SSL\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.saslMechanism": {
+ "name":
"camel.kamelet.kafka-batch-source.saslMechanism",
+ "description": "The Simple Authentication and Security
Layer (SASL) Mechanism used.",
+ "defaultValue": "\"PLAIN\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.user": {
+ "name": "camel.kamelet.kafka-batch-source.user",
+ "description": "Username to authenticate to Kafka",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-source.password": {
+ "name": "camel.kamelet.kafka-batch-source.password",
+ "description": "Password to authenticate to kafka",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-source.autoCommitEnable": {
+ "name":
"camel.kamelet.kafka-batch-source.autoCommitEnable",
+ "description": "If true, periodically commit to
ZooKeeper the offset of messages already fetched by the consumer",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.allowManualCommit": {
+ "name":
"camel.kamelet.kafka-batch-source.allowManualCommit",
+ "description": "Whether to allow doing manual commits",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.pollOnError": {
+ "name": "camel.kamelet.kafka-batch-source.pollOnError",
+ "description": "What to do if kafka threw an exception
while polling for new messages. There are 5 enums and the value can be one of
DISCARD, ERROR_HANDLER, RECONNECT, RETRY, STOP",
+ "defaultValue": "\"ERROR_HANDLER\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.autoOffsetReset": {
+ "name":
"camel.kamelet.kafka-batch-source.autoOffsetReset",
+ "description": "What to do when there is no initial
offset. There are 3 enums and the value can be one of latest, earliest, none",
+ "defaultValue": "\"latest\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.consumerGroup": {
+ "name":
"camel.kamelet.kafka-batch-source.consumerGroup",
+ "description": "A string that uniquely identifies the
group of consumers to which this source belongs Example: my-group-id",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.deserializeHeaders": {
+ "name":
"camel.kamelet.kafka-batch-source.deserializeHeaders",
+ "description": "When enabled the Kamelet source will
deserialize all message headers to String representation.",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.batchSize": {
+ "name": "camel.kamelet.kafka-batch-source.batchSize",
+ "description": "The maximum number of records returned
in a single call to poll()",
+ "defaultValue": "500",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.pollTimeout": {
+ "name": "camel.kamelet.kafka-batch-source.pollTimeout",
+ "description": "The timeout used when polling the
KafkaConsumer",
+ "defaultValue": "5000",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-source.maxPollIntervalMs": {
+ "name":
"camel.kamelet.kafka-batch-source.maxPollIntervalMs",
+ "description": "The maximum delay between invocations
of poll() when using consumer group management",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-ssl-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-ssl-source-source.json
new file mode 100644
index 0000000000..21c34c54b0
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-kafka-batch-ssl-source-source.json
@@ -0,0 +1,151 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.kafkabatchsslsource.CamelKafkabatchsslsourceSourceConnector",
+ "artifactId": "camel-kafka-batch-ssl-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-kafka-batch-ssl-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Receive data from Kafka topics in batch with
SSL\/TLS support and commit them manually through KafkaManualCommit or
automatically."
+ },
+ "properties": {
+ "camel.kamelet.kafka-batch-ssl-source.topic": {
+ "name": "camel.kamelet.kafka-batch-ssl-source.topic",
+ "description": "Comma separated list of Kafka topic
names",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.bootstrapServers": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.bootstrapServers",
+ "description": "Comma separated list of Kafka Broker
URLs",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.securityProtocol": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.securityProtocol",
+ "description": "Protocol used to communicate with
brokers. SASL_PLAINTEXT, PLAINTEXT, SASL_SSL and SSL are supported",
+ "defaultValue": "\"SSL\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.saslMechanism": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.saslMechanism",
+ "description": "The Simple Authentication and Security
Layer (SASL) Mechanism used.",
+ "defaultValue": "\"GSSAPI\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.autoCommitEnable": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.autoCommitEnable",
+ "description": "If true, periodically commit to
ZooKeeper the offset of messages already fetched by the consumer",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.allowManualCommit": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.allowManualCommit",
+ "description": "Whether to allow doing manual commits",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.pollOnError": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.pollOnError",
+ "description": "What to do if kafka threw an exception
while polling for new messages. There are 5 enums and the value can be one of
DISCARD, ERROR_HANDLER, RECONNECT, RETRY, STOP",
+ "defaultValue": "\"ERROR_HANDLER\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.autoOffsetReset": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.autoOffsetReset",
+ "description": "What to do when there is no initial
offset. There are 3 enums and the value can be one of latest, earliest, none",
+ "defaultValue": "\"latest\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.consumerGroup": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.consumerGroup",
+ "description": "A string that uniquely identifies the
group of consumers to which this source belongs Example: my-group-id",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.deserializeHeaders": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.deserializeHeaders",
+ "description": "When enabled the Kamelet source will
deserialize all message headers to String representation.",
+ "defaultValue": "true",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.sslKeyPassword": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.sslKeyPassword",
+ "description": "The password of the private key in the
key store file.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.sslKeystorePassword": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.sslKeystorePassword",
+ "description": "The store password for the key store
file.This is optional for client and only needed if ssl.keystore.location is
configured.",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.sslEndpointAlgorithm": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.sslEndpointAlgorithm",
+ "description": "The endpoint identification algorithm
to validate server hostname using server certificate. Use none or false to
disable server hostname verification.",
+ "defaultValue": "\"https\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.sslProtocol": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.sslProtocol",
+ "description": "The SSL protocol used to generate the
SSLContext. Default setting is TLS, which is fine for most cases. Allowed
values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be
supported in older JVMs, but their usage is discouraged due to known security
vulnerabilities.",
+ "defaultValue": "\"TLSv1.2\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.sslKeystoreLocation": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.sslKeystoreLocation",
+ "description": "The location of the key store file.
This is optional for client and can be used for two-way authentication for
client.",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.sslTruststoreLocation": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.sslTruststoreLocation",
+ "description": "The location of the trust store file.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.sslEnabledProtocols": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.sslEnabledProtocols",
+ "description": "The list of protocols enabled for SSL
connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.",
+ "defaultValue": "\"TLSv1.2,TLSv1.1,TLSv1\"",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.saslJaasConfig": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.saslJaasConfig",
+ "description": "Java Authentication and Authorization
Service (JAAS) for Simple Authentication and Security Layer (SASL)
configuration.",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.batchSize": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.batchSize",
+ "description": "The maximum number of records returned
in a single call to poll()",
+ "defaultValue": "500",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.pollTimeout": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.pollTimeout",
+ "description": "The timeout used when polling the
KafkaConsumer",
+ "defaultValue": "5000",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.kafka-batch-ssl-source.maxPollIntervalMs": {
+ "name":
"camel.kamelet.kafka-batch-ssl-source.maxPollIntervalMs",
+ "description": "The maximum delay between invocations
of poll() when using consumer group management",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-snowflake-sink-sink.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-snowflake-sink-sink.json
new file mode 100644
index 0000000000..6d07110f03
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-snowflake-sink-sink.json
@@ -0,0 +1,43 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.snowflakesink.CamelSnowflakesinkSinkConnector",
+ "artifactId": "camel-snowflake-sink-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-snowflake-sink-sink",
+ "type": "sink",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Send data to a Snowflake Database.\n\nThis
Kamelet expects a JSON-formatted body. Use key:value pairs to map the JSON
fields and parameters. For example, here is a query:\n\n'INSERT INTO accounts
(username,city) VALUES (:#username,:#city)'\n\nHere is example input for the
example query:\n\n'{ \"username\":\"oscerd\", \"city\":\"Rome\"}'"
+ },
+ "properties": {
+ "camel.kamelet.snowflake-sink.instanceUrl": {
+ "name": "camel.kamelet.snowflake-sink.instanceUrl",
+ "description": "The Instance url Example:
instance.snowflakecomputing.com",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.snowflake-sink.username": {
+ "name": "camel.kamelet.snowflake-sink.username",
+ "description": "The username to access a secured
Snowflake Database.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.snowflake-sink.password": {
+ "name": "camel.kamelet.snowflake-sink.password",
+ "description": "The password to access a secured
Snowflake Database.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.snowflake-sink.query": {
+ "name": "camel.kamelet.snowflake-sink.query",
+ "description": "The query to execute against the
Snowflake Database. Example: INSERT INTO accounts (username,city) VALUES
(:#username,:#city)",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.snowflake-sink.databaseName": {
+ "name": "camel.kamelet.snowflake-sink.databaseName",
+ "description": "The name of the Snowflake Database.",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-snowflake-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-snowflake-source-source.json
new file mode 100644
index 0000000000..9794e4ac39
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-snowflake-source-source.json
@@ -0,0 +1,56 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.snowflakesource.CamelSnowflakesourceSourceConnector",
+ "artifactId": "camel-snowflake-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-snowflake-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Query data from a Snowflake Database."
+ },
+ "properties": {
+ "camel.kamelet.snowflake-source.instanceUrl": {
+ "name": "camel.kamelet.snowflake-source.instanceUrl",
+ "description": "The Instance url Example:
instance.snowflakecomputing.com",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.snowflake-source.username": {
+ "name": "camel.kamelet.snowflake-source.username",
+ "description": "The username to access a secured
Snowflake Database.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.snowflake-source.password": {
+ "name": "camel.kamelet.snowflake-source.password",
+ "description": "The password to access a secured
Snowflake Database.",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.snowflake-source.query": {
+ "name": "camel.kamelet.snowflake-source.query",
+ "description": "The query to execute against the
Snowflake Database. Example: INSERT INTO accounts (username,city) VALUES
(:#username,:#city)",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.snowflake-source.databaseName": {
+ "name": "camel.kamelet.snowflake-source.databaseName",
+ "description": "The name of the Snowflake Database.",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.snowflake-source.consumedQuery": {
+ "name": "camel.kamelet.snowflake-source.consumedQuery",
+ "description": "A query to run on a tuple consumed.
Example: DELETE FROM accounts where user_id = :#user_id",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.snowflake-source.delay": {
+ "name": "camel.kamelet.snowflake-source.delay",
+ "description": "The number of milliseconds before the
next poll",
+ "defaultValue": "500",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-spring-rabbitmq-sink-sink.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-spring-rabbitmq-sink-sink.json
new file mode 100644
index 0000000000..aecd5775a0
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-spring-rabbitmq-sink-sink.json
@@ -0,0 +1,55 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.springrabbitmqsink.CamelSpringrabbitmqsinkSinkConnector",
+ "artifactId": "camel-spring-rabbitmq-sink-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-spring-rabbitmq-sink-sink",
+ "type": "sink",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Send data to a RabbitMQ Broker."
+ },
+ "properties": {
+ "camel.kamelet.spring-rabbitmq-sink.host": {
+ "name": "camel.kamelet.spring-rabbitmq-sink.host",
+ "description": "RabbitMQ broker address Example:
localhost",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.spring-rabbitmq-sink.port": {
+ "name": "camel.kamelet.spring-rabbitmq-sink.port",
+ "description": "RabbitMQ broker port Example: 5672",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.spring-rabbitmq-sink.routingKey": {
+ "name": "camel.kamelet.spring-rabbitmq-sink.routingKey",
+ "description": "The routing key to use when binding a
consumer queue to the exchange",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.spring-rabbitmq-sink.username": {
+ "name": "camel.kamelet.spring-rabbitmq-sink.username",
+ "description": "The username to access the RabbitMQ
server",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.spring-rabbitmq-sink.password": {
+ "name": "camel.kamelet.spring-rabbitmq-sink.password",
+ "description": "The password to access the RabbitMQ
server",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.spring-rabbitmq-sink.exchangeName": {
+ "name":
"camel.kamelet.spring-rabbitmq-sink.exchangeName",
+ "description": "The exchange name determines the
exchange the queue will be bound to",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.spring-rabbitmq-sink.queues": {
+ "name": "camel.kamelet.spring-rabbitmq-sink.queues",
+ "description": "The queue to receive messages from",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git
a/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-spring-rabbitmq-source-source.json
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-spring-rabbitmq-source-source.json
new file mode 100644
index 0000000000..3207def295
--- /dev/null
+++
b/camel-kafka-connector-catalog/src/generated/resources/connectors/camel-spring-rabbitmq-source-source.json
@@ -0,0 +1,62 @@
+{
+ "connector": {
+ "class":
"org.apache.camel.kafkaconnector.springrabbitmqsource.CamelSpringrabbitmqsourceSourceConnector",
+ "artifactId": "camel-spring-rabbitmq-source-kafka-connector",
+ "groupId": "org.apache.camel.kafkaconnector",
+ "id": "camel-spring-rabbitmq-source-source",
+ "type": "source",
+ "version": "4.4.2-SNAPSHOT",
+ "description": "Receive data from a RabbitMQ Broker."
+ },
+ "properties": {
+ "camel.kamelet.spring-rabbitmq-source.host": {
+ "name": "camel.kamelet.spring-rabbitmq-source.host",
+ "description": "RabbitMQ broker address Example:
localhost",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.spring-rabbitmq-source.port": {
+ "name": "camel.kamelet.spring-rabbitmq-source.port",
+ "description": "RabbitMQ broker port Example: 5672",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.spring-rabbitmq-source.routingKey": {
+ "name":
"camel.kamelet.spring-rabbitmq-source.routingKey",
+ "description": "The routing key to use when binding a
consumer queue to the exchange",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.spring-rabbitmq-source.username": {
+ "name": "camel.kamelet.spring-rabbitmq-source.username",
+ "description": "The username to access the RabbitMQ
server",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.spring-rabbitmq-source.password": {
+ "name": "camel.kamelet.spring-rabbitmq-source.password",
+ "description": "The password to access the RabbitMQ
server",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.spring-rabbitmq-source.exchangeName": {
+ "name":
"camel.kamelet.spring-rabbitmq-source.exchangeName",
+ "description": "The exchange name determines the
exchange the queue will be bound to",
+ "priority": "HIGH",
+ "required": "true"
+ },
+ "camel.kamelet.spring-rabbitmq-source.queues": {
+ "name": "camel.kamelet.spring-rabbitmq-source.queues",
+ "description": "The queue to receive messages from",
+ "priority": "MEDIUM",
+ "required": "false"
+ },
+ "camel.kamelet.spring-rabbitmq-source.autoDeclare": {
+ "name":
"camel.kamelet.spring-rabbitmq-source.autoDeclare",
+ "description": "The routing key to use when binding a
consumer queue to the exchange",
+ "defaultValue": "false",
+ "priority": "MEDIUM",
+ "required": "false"
+ }
+ }
+}
diff --git a/connectors/camel-postgresql-sink-kafka-connector/pom.xml
b/connectors/camel-postgresql-sink-kafka-connector/pom.xml
index 19c32bcfec..d2caca7b9e 100644
--- a/connectors/camel-postgresql-sink-kafka-connector/pom.xml
+++ b/connectors/camel-postgresql-sink-kafka-connector/pom.xml
@@ -56,7 +56,7 @@
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
- <version>42.7.2</version>
+ <version>42.7.1</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
diff --git a/connectors/camel-postgresql-source-kafka-connector/pom.xml
b/connectors/camel-postgresql-source-kafka-connector/pom.xml
index 013fe5b59f..574bc53049 100644
--- a/connectors/camel-postgresql-source-kafka-connector/pom.xml
+++ b/connectors/camel-postgresql-source-kafka-connector/pom.xml
@@ -56,7 +56,7 @@
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
- <version>42.7.2</version>
+ <version>42.7.1</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>