This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git

commit e827ff61b4ec842a18814c1a0310816a6cb316b6
Author: Tiewei Fang <[email protected]>
AuthorDate: Tue Feb 6 11:58:25 2024 +0800

    [fix](Docs) Modify the document of data export #30785
---
 docs/en/docs/admin-manual/data-admin/backup.md     | 19 +++++++++++-
 docs/en/docs/data-operate/export/export-manual.md  | 36 ++++++++++++++++++----
 docs/zh-CN/docs/admin-manual/data-admin/backup.md  | 18 ++++++++++-
 .../docs/data-operate/export/export-manual.md      | 35 +++++++++++++++++----
 4 files changed, 94 insertions(+), 14 deletions(-)

diff --git a/docs/en/docs/admin-manual/data-admin/backup.md 
b/docs/en/docs/admin-manual/data-admin/backup.md
index 4edf5860c14..91e0f4dfd25 100644
--- a/docs/en/docs/admin-manual/data-admin/backup.md
+++ b/docs/en/docs/admin-manual/data-admin/backup.md
@@ -58,9 +58,26 @@ ALTER TABLE tbl1 SET ("dynamic_partition.enable"="true")
 
 1. Create a hdfs remote warehouse example_repo:
 
+   **WITH HDFS (Recommended)**
+
+   ```sql
+   CREATE REPOSITORY `example_repo`
+   WITH HDFS
+   ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
+   PROPERTIES
+   (
+   "fs.defaultFS"="hdfs://hdfs_host:port",
+   "hadoop.username" = "hadoop"
+   );
+   ```
+
+   **WITH BROKER**
+
+   This requires starting a broker process first.
+
    ```sql
    CREATE REPOSITORY `example_repo`
-   WITH BROKER `hdfs_broker`
+   WITH BROKER `broker_name`
    ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
    PROPERTIES
    (
diff --git a/docs/en/docs/data-operate/export/export-manual.md 
b/docs/en/docs/data-operate/export/export-manual.md
index 2b030079bce..e6f20aee691 100644
--- a/docs/en/docs/data-operate/export/export-manual.md
+++ b/docs/en/docs/data-operate/export/export-manual.md
@@ -52,6 +52,8 @@ Export's detailed commands can be passed through `HELP 
EXPORT;` in mysql client.
 
 ### Export to HDFS
 
+**WITH HDFS (Recommended)**
+
 ```sql
 EXPORT TABLE db1.tbl1 
 PARTITION (p1,p2)
@@ -62,12 +64,11 @@ PROPERTIES
     "label" = "mylabel",
     "column_separator"=",",
     "columns" = "col1,col2",
-    "parallelusm" = "3"
+    "parallelism" = "3"
 )
-WITH BROKER "hdfs"
-(
-    "username" = "user",
-    "password" = "passwd"
+with HDFS (
+"fs.defaultFS"="hdfs://hdfs_host:port",
+"hadoop.username" = "hadoop"
 );
 ```
 
@@ -75,7 +76,30 @@ WITH BROKER "hdfs"
 * `column_separator`: Column separator. The default is `\t`. Supports 
invisible characters, such as'\x07'.
 * `column`: columns to be exported, separated by commas, if this parameter is 
not filled in, all columns of the table will be exported by default.
 * `line_delimiter`: Line separator. The default is `\n`. Supports invisible 
characters, such as'\x07'.
-* `parallelusm`:Exporting with 3 concurrent threads.
+* `parallelism`:Exporting with 3 concurrent threads.
+
+**WITH BROKER**
+
+This requires starting a broker process first.
+
+```sql
+EXPORT TABLE db1.tbl1 
+PARTITION (p1,p2)
+[WHERE [expr]]
+TO "hdfs://host/path/to/export/"
+PROPERTIES
+(
+    "label" = "mylabel",
+    "column_separator"=",",
+    "columns" = "col1,col2",
+    "parallelism" = "3"
+)
+WITH BROKER "broker_name" 
+(
+  "username"="xxx",
+  "password"="yyy"
+);
+```
 
 ### Export to Object Storage (Supports S3 Protocol)
 
diff --git a/docs/zh-CN/docs/admin-manual/data-admin/backup.md 
b/docs/zh-CN/docs/admin-manual/data-admin/backup.md
index fd577d57534..e16609f8ef8 100644
--- a/docs/zh-CN/docs/admin-manual/data-admin/backup.md
+++ b/docs/zh-CN/docs/admin-manual/data-admin/backup.md
@@ -58,9 +58,25 @@ Doris 支持将当前数据以文件的形式,通过 broker 备份到远端存
 
 1. 创建一个 hdfs 的远程仓库 example_repo:
 
+   **WITH HDFS(推荐使用)**
    ```sql
    CREATE REPOSITORY `example_repo`
-   WITH BROKER `hdfs_broker`
+   WITH HDFS
+   ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
+   PROPERTIES
+   (
+   "fs.defaultFS"="hdfs://hdfs_host:port",
+   "hadoop.username" = "hadoop"
+   );
+   ```
+
+   **WITH BROKER**
+
+   需要先启动一个BROKER进程。
+   
+   ```sql
+   CREATE REPOSITORY `example_repo`
+   WITH BROKER `broker_name`
    ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
    PROPERTIES
    (
diff --git a/docs/zh-CN/docs/data-operate/export/export-manual.md 
b/docs/zh-CN/docs/data-operate/export/export-manual.md
index b702d1c096e..1781f888518 100644
--- a/docs/zh-CN/docs/data-operate/export/export-manual.md
+++ b/docs/zh-CN/docs/data-operate/export/export-manual.md
@@ -50,6 +50,7 @@ Export 的详细用法可参考 
[EXPORT](../../sql-manual/sql-reference/Data-Man
 
 ### 导出到HDFS
 
+**WITH HDFS(推荐使用)**
 ```sql
 EXPORT TABLE db1.tbl1 
 PARTITION (p1,p2)
@@ -60,12 +61,11 @@ PROPERTIES
     "label" = "mylabel",
     "column_separator"=",",
     "columns" = "col1,col2",
-    "parallelusm" = "3"
+    "parallelism" = "3"
 )
-WITH BROKER "hdfs"
-(
-    "username" = "user",
-    "password" = "passwd"
+with HDFS (
+"fs.defaultFS"="hdfs://hdfs_host:port",
+"hadoop.username" = "hadoop"
 );
 ```
 
@@ -73,7 +73,30 @@ WITH BROKER "hdfs"
 * `column_separator`:列分隔符。默认为 `\t`。支持不可见字符,比如 '\x07'。
 * `columns`:要导出的列,使用英文状态逗号隔开,如果不填这个参数默认是导出表的所有列。
 * `line_delimiter`:行分隔符。默认为 `\n`。支持不可见字符,比如 '\x07'。
-* `parallelusm`:并发3个线程去导出。
+* `parallelism`:并发3个线程去导出。
+
+**WITH BROKER**
+
+需要先启动一个BROKER进程。
+
+```sql
+EXPORT TABLE db1.tbl1 
+PARTITION (p1,p2)
+[WHERE [expr]]
+TO "hdfs://host/path/to/export/" 
+PROPERTIES
+(
+    "label" = "mylabel",
+    "column_separator"=",",
+    "columns" = "col1,col2",
+    "parallelism" = "3"
+)
+WITH BROKER "broker_name" 
+(
+  "username"="xxx",
+  "password"="yyy"
+);
+```
 
 ### 导出到对象存储
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to