This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git


The following commit(s) were added to refs/heads/master by this push:
     new d784cc0  [Doc] Flink doris connector document modification (#5769)
d784cc0 is described below

commit d784cc06f6c1487e9648c8ec754ebf68733219f9
Author: jiafeng.zhang <[email protected]>
AuthorDate: Wed May 12 10:59:35 2021 +0800

    [Doc] Flink doris connector document modification (#5769)
---
 docs/en/extending-doris/flink-doris-connector.md    | 12 ++++++------
 docs/zh-CN/extending-doris/flink-doris-connector.md | 10 +++++-----
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/docs/en/extending-doris/flink-doris-connector.md 
b/docs/en/extending-doris/flink-doris-connector.md
index cce7aaf..b83aefd 100644
--- a/docs/en/extending-doris/flink-doris-connector.md
+++ b/docs/en/extending-doris/flink-doris-connector.md
@@ -96,7 +96,7 @@ INSERT INTO flink_doris_sink select name,age,price,sale from 
flink_doris_source
  properties.put("table.identifier","db.table");
  env.addSource(new DorisSourceFunction(new DorisStreamOptions(properties),new 
SimpleListDeserializationSchema())).print();
 ```
- 
+
 ### General
 
 | Key                              | Default Value     | Comment               
                                       |
@@ -109,16 +109,16 @@ INSERT INTO flink_doris_sink select name,age,price,sale 
from flink_doris_source
 | doris.request.connect.timeout.ms | 30000             | Connection timeout 
for sending requests to Doris                                |
 | doris.request.read.timeout.ms    | 30000             | Read timeout for 
sending request to Doris                                |
 | doris.request.query.timeout.s    | 3600              | Query the timeout 
time of doris, the default is 1 hour, -1 means no timeout limit             |
-| doris.request.tablet.size        | Integer.MAX_VALUE | The number of Doris 
Tablets corresponding to an RDD Partition. The smaller this value is set, the 
more partitions will be generated. This will increase the parallelism on the 
Spark side, but at the same time will cause greater pressure on Doris. |
-| doris.batch.size                 | 1024              | The maximum number of 
rows to read data from BE at one time. Increasing this value can reduce the 
number of connections between Spark and Doris. Thereby reducing the extra time 
overhead caused by network delay. |
+| doris.request.tablet.size        | Integer.MAX_VALUE | The number of Doris 
Tablets corresponding to an Partition. The smaller this value is set, the more 
partitions will be generated. This will increase the parallelism on the flink 
side, but at the same time will cause greater pressure on Doris. |
+| doris.batch.size                 | 1024              | The maximum number of 
rows to read data from BE at one time. Increasing this value can reduce the 
number of connections between Flink and Doris. Thereby reducing the extra time 
overhead caused by network delay. |
 | doris.exec.mem.limit             | 2147483648        | Memory limit for a 
single query. The default is 2GB, in bytes.                     |
-| doris.deserialize.arrow.async    | false             | Whether to support 
asynchronous conversion of Arrow format to RowBatch required for 
spark-doris-connector iteration                 |
+| doris.deserialize.arrow.async    | false             | Whether to support 
asynchronous conversion of Arrow format to RowBatch required for 
flink-doris-connector iteration           |
 | doris.deserialize.queue.size     | 64                | Asynchronous 
conversion of the internal processing queue in Arrow format takes effect when 
doris.deserialize.arrow.async is true        |
 | doris.read.field            | --            | List of column names in the 
Doris table, separated by commas                  |
 | doris.filter.query          | --            | Filter expression of the 
query, which is transparently transmitted to Doris. Doris uses this expression 
to complete source-side data filtering. |
 | sink.batch.size                        | 100            | Maximum number of 
lines in a single write BE                                             |
 | sink.max-retries                        | 1            | Number of retries 
after writing BE failed                                              |
-  
+
 
 ## Doris & Flink Column Type Mapping
 
@@ -140,4 +140,4 @@ INSERT INTO flink_doris_sink select name,age,price,sale 
from flink_doris_source
 | VARCHAR    | STRING            |
 | DECIMALV2  | DECIMAL                      |
 | TIME       | DOUBLE             |
-| HLL        | Unsupported datatype             |
+| HLL        | Unsupported datatype             |
\ No newline at end of file
diff --git a/docs/zh-CN/extending-doris/flink-doris-connector.md 
b/docs/zh-CN/extending-doris/flink-doris-connector.md
index b3180f6..20ed015 100644
--- a/docs/zh-CN/extending-doris/flink-doris-connector.md
+++ b/docs/zh-CN/extending-doris/flink-doris-connector.md
@@ -96,7 +96,7 @@ INSERT INTO flink_doris_sink select name,age,price,sale from 
flink_doris_source
  properties.put("table.identifier","db.table");
  env.addSource(new DorisSourceFunction(new DorisStreamOptions(properties),new 
SimpleListDeserializationSchema())).print();
 ```
- 
+
 
 ## 配置
 
@@ -112,10 +112,10 @@ INSERT INTO flink_doris_sink select name,age,price,sale 
from flink_doris_source
 | doris.request.connect.timeout.ms | 30000             | 向Doris发送请求的连接超时时间     
                           |
 | doris.request.read.timeout.ms    | 30000             | 向Doris发送请求的读取超时时间     
                           |
 | doris.request.query.timeout.s    | 3600              | 
查询doris的超时时间,默认值为1小时,-1表示无超时限制             |
-| doris.request.tablet.size        | Integer.MAX_VALUE | 一个RDD 
Partition对应的Doris Tablet个数。<br 
/>此数值设置越小,则会生成越多的Partition。从而提升Spark侧的并行度,但同时会对Doris造成更大的压力。 |
-| doris.batch.size                 | 1024              | 
一次从BE读取数据的最大行数。增大此数值可减少Spark与Doris之间建立连接的次数。<br />从而减轻网络延迟所带来的的额外时间开销。 |
+| doris.request.tablet.size        | Integer.MAX_VALUE | 一个Partition对应的Doris 
Tablet个数。<br />此数值设置越小,则会生成越多的Partition。从而提升Flink侧的并行度,但同时会对Doris造成更大的压力。 |
+| doris.batch.size                 | 1024              | 
一次从BE读取数据的最大行数。增大此数值可减少flink与Doris之间建立连接的次数。<br />从而减轻网络延迟所带来的的额外时间开销。 |
 | doris.exec.mem.limit             | 2147483648        | 单个查询的内存限制。默认为 
2GB,单位为字节                      |
-| doris.deserialize.arrow.async    | false             | 
是否支持异步转换Arrow格式到spark-doris-connector迭代所需的RowBatch                 |
+| doris.deserialize.arrow.async    | false             | 
是否支持异步转换Arrow格式到flink-doris-connector迭代所需的RowBatch            |
 | doris.deserialize.queue.size     | 64                | 
异步转换Arrow格式的内部处理队列,当doris.deserialize.arrow.async为true时生效        |
 | doris.read.field            | --            | 读取Doris表的列名列表,多列之间使用逗号分隔       
           |
 | doris.filter.query          | --            | 
过滤读取数据的表达式,此表达式透传给Doris。Doris使用此表达式完成源端数据过滤。 |
@@ -144,4 +144,4 @@ INSERT INTO flink_doris_sink select name,age,price,sale 
from flink_doris_source
 | VARCHAR    | STRING            |
 | DECIMALV2  | DECIMAL                      |
 | TIME       | DOUBLE             |
-| HLL        | Unsupported datatype             |
+| HLL        | Unsupported datatype             |
\ No newline at end of file

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to