This is an automated email from the ASF dual-hosted git repository.
dockerzhang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/inlong-website.git
The following commit(s) were added to refs/heads/master by this push:
new 99bade761b6 [INLONG-1083][Doc] Correct description of connector name
for sort-connector Kafka and ElasticSearch (#1084)
99bade761b6 is described below
commit 99bade761b6b38ccc6f576c1b718ec18281717d9
Author: Hengyuan <[email protected]>
AuthorDate: Tue Dec 3 19:44:13 2024 +0800
[INLONG-1083][Doc] Correct description of connector name for sort-connector
Kafka and ElasticSearch (#1084)
---
docs/data_node/extract_node/kafka.md | 2 +-
.../current/data_node/extract_node/kafka.md | 2 +-
.../current/data_node/load_node/elasticsearch.md | 2 +-
.../version-2.0.0/data_node/load_node/elasticsearch.md | 2 +-
.../version-2.0.0/data_node/load_node/kafka.md | 2 +-
versioned_docs/version-2.0.0/data_node/extract_node/kafka.md | 2 +-
6 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/docs/data_node/extract_node/kafka.md
b/docs/data_node/extract_node/kafka.md
index 6f1f360d92f..68b47c99842 100644
--- a/docs/data_node/extract_node/kafka.md
+++ b/docs/data_node/extract_node/kafka.md
@@ -59,7 +59,7 @@ Flink SQL> CREATE TABLE kafka_extract_node (
-- Read data
Flink SQL> SELECT * FROM kafka_extract_node;
```
-* connector is `upsert-kafka`
+* connector is `upsert-kafka-inlong`
```sql
-- Set checkpoint every 3000 milliseconds
Flink SQL> SET 'execution.checkpointing.interval' = '3s';
diff --git
a/i18n/zh-CN/docusaurus-plugin-content-docs/current/data_node/extract_node/kafka.md
b/i18n/zh-CN/docusaurus-plugin-content-docs/current/data_node/extract_node/kafka.md
index ff61be058ee..48bf45958d6 100644
---
a/i18n/zh-CN/docusaurus-plugin-content-docs/current/data_node/extract_node/kafka.md
+++
b/i18n/zh-CN/docusaurus-plugin-content-docs/current/data_node/extract_node/kafka.md
@@ -57,7 +57,7 @@ Flink SQL> CREATE TABLE kafka_extract_node (
-- 读取数据
Flink SQL> SELECT * FROM kafka_extract_node;
```
-* 连接器是 `upsert-kafka`
+* 连接器是 `upsert-kafka-inlong`
```sql
-- 设置 Checkpoint 为 3000 毫秒
Flink SQL> SET 'execution.checkpointing.interval' = '3s';
diff --git
a/i18n/zh-CN/docusaurus-plugin-content-docs/current/data_node/load_node/elasticsearch.md
b/i18n/zh-CN/docusaurus-plugin-content-docs/current/data_node/load_node/elasticsearch.md
index e01248355c5..fe4d489e3fe 100644
---
a/i18n/zh-CN/docusaurus-plugin-content-docs/current/data_node/load_node/elasticsearch.md
+++
b/i18n/zh-CN/docusaurus-plugin-content-docs/current/data_node/load_node/elasticsearch.md
@@ -58,7 +58,7 @@ CREATE TABLE myUserTable (
pv BIGINT,
PRIMARY KEY (user_id) NOT ENFORCED
) WITH (
- 'connector' = 'elasticsearch-7',
+ 'connector' = 'elasticsearch7-inlong',
'hosts' = 'http://localhost:9200',
'index' = 'users'
);
diff --git
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.0.0/data_node/load_node/elasticsearch.md
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.0.0/data_node/load_node/elasticsearch.md
index e01248355c5..fe4d489e3fe 100644
---
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.0.0/data_node/load_node/elasticsearch.md
+++
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.0.0/data_node/load_node/elasticsearch.md
@@ -58,7 +58,7 @@ CREATE TABLE myUserTable (
pv BIGINT,
PRIMARY KEY (user_id) NOT ENFORCED
) WITH (
- 'connector' = 'elasticsearch-7',
+ 'connector' = 'elasticsearch7-inlong',
'hosts' = 'http://localhost:9200',
'index' = 'users'
);
diff --git
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.0.0/data_node/load_node/kafka.md
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.0.0/data_node/load_node/kafka.md
index ba389cb15f6..606d86cb2c8 100644
---
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.0.0/data_node/load_node/kafka.md
+++
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.0.0/data_node/load_node/kafka.md
@@ -49,7 +49,7 @@ Flink SQL> CREATE TABLE kafka_load_node (
'format' = 'csv'
)
```
-* 连接器是 `upsert-kafka`
+* 连接器是 `upsert-kafka-inlong`
```sql
-- 使用 Flink SQL 创建 Kafka 表 'kafka_load_node'
Flink SQL> CREATE TABLE kafka_load_node (
diff --git a/versioned_docs/version-2.0.0/data_node/extract_node/kafka.md
b/versioned_docs/version-2.0.0/data_node/extract_node/kafka.md
index 6f1f360d92f..68b47c99842 100644
--- a/versioned_docs/version-2.0.0/data_node/extract_node/kafka.md
+++ b/versioned_docs/version-2.0.0/data_node/extract_node/kafka.md
@@ -59,7 +59,7 @@ Flink SQL> CREATE TABLE kafka_extract_node (
-- Read data
Flink SQL> SELECT * FROM kafka_extract_node;
```
-* connector is `upsert-kafka`
+* connector is `upsert-kafka-inlong`
```sql
-- Set checkpoint every 3000 milliseconds
Flink SQL> SET 'execution.checkpointing.interval' = '3s';