This is an automated email from the ASF dual-hosted git repository.
panjuan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/shardingsphere.git
The following commit(s) were added to refs/heads/master by this push:
new cc98db4 Update change history of ShardingSphere-JDBC. (#13981)
cc98db4 is described below
commit cc98db42a96791d4093c235e7f4f16f45e16f9d8
Author: yx9o <[email protected]>
AuthorDate: Tue Dec 7 19:23:57 2021 +0800
Update change history of ShardingSphere-JDBC. (#13981)
---
.../shardingsphere-jdbc/yaml-configuration.cn.md | 478 ++++++++++++---------
.../shardingsphere-jdbc/yaml-configuration.en.md | 373 ++++++++--------
2 files changed, 472 insertions(+), 379 deletions(-)
diff --git
a/docs/document/content/reference/api-change-history/shardingsphere-jdbc/yaml-configuration.cn.md
b/docs/document/content/reference/api-change-history/shardingsphere-jdbc/yaml-configuration.cn.md
index aa6a254..47ccc88 100644
---
a/docs/document/content/reference/api-change-history/shardingsphere-jdbc/yaml-configuration.cn.md
+++
b/docs/document/content/reference/api-change-history/shardingsphere-jdbc/yaml-configuration.cn.md
@@ -164,57 +164,70 @@ governance:
#### 配置项说明
```yaml
-dataSources:
- ds0: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds0
- username: root
- password:
- ds1: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds1
- username: root
- password:
+dataSources: # 数据源配置,可配置多个 data_source_name
+ <data_source_name>: # <!!数据库连接池实现类> `!!`表示实例化该类
+ driverClassName: # 数据库驱动类名
+ url: # 数据库 url 连接
+ username: # 数据库用户名
+ password: # 数据库密码
+ # ... 数据库连接池的其它属性
-shardingRule:
- tables:
- t_order:
- actualDataNodes: ds${0..1}.t_order${0..1}
- databaseStrategy:
- inline:
- shardingColumn: user_id
- algorithmExpression: ds${user_id % 2}
- tableStrategy:
- inline:
- shardingColumn: order_id
- algorithmExpression: t_order${order_id % 2}
+shardingRule:
+ tables: # 数据分片规则配置,可配置多个 logic_table_name
+ <logic_table_name>: # 逻辑表名称
+ actualDataNodes: # 由数据源名 + 表名组成,以小数点分隔。多个表以逗号分隔,支持 inline
表达式。缺省表示使用已知数据源与逻辑表名称生成数据节点,用于广播表(即每个库中都需要一个同样的表用于关联查询,多为字典表)或只分库不分表且所有库的表结构完全一致的情况
+
+ databaseStrategy: # 分库策略,缺省表示使用默认分库策略,以下的分片策略只能选其一
+ standard: # 用于单分片键的标准分片场景
+ shardingColumn: # 分片列名称
+ preciseAlgorithmClassName: # 精确分片算法类名称,用于 = 和 IN。。该类需实现
PreciseShardingAlgorithm 接口并提供无参数的构造器
+ rangeAlgorithmClassName: # 范围分片算法类名称,用于 BETWEEN,可选。。该类需实现
RangeShardingAlgorithm 接口并提供无参数的构造器
+ complex: # 用于多分片键的复合分片场景
+ shardingColumns: # 分片列名称,多个列以逗号分隔
+ algorithmClassName: # 复合分片算法类名称。该类需实现 ComplexKeysShardingAlgorithm
接口并提供无参数的构造器
+ inline: # 行表达式分片策略
+ shardingColumn: # 分片列名称
+ algorithmInlineExpression: # 分片算法行表达式,需符合 groovy 语法
+ hint: # Hint 分片策略
+ algorithmClassName: # Hint 分片算法类名称。该类需实现 HintShardingAlgorithm
接口并提供无参数的构造器
+ none: # 不分片
+ tableStrategy: # 分表策略,同分库策略
keyGenerator:
- type: SNOWFLAKE
- column: order_id
- t_order_item:
- actualDataNodes: ds${0..1}.t_order_item${0..1}
- databaseStrategy:
- inline:
- shardingColumn: user_id
- algorithmExpression: ds${user_id % 2}
- tableStrategy:
- inline:
- shardingColumn: order_id
- algorithmExpression: t_order_item${order_id % 2}
- bindingTables:
- - t_order,t_order_item
- broadcastTables:
- - t_config
-
- defaultDataSourceName: ds0
- defaultTableStrategy:
- none:
- defaultKeyGenerator:
- type: SNOWFLAKE
- column: order_id
-
-props:
- sql.show: true
+ column: # 自增列名称,缺省表示不使用自增主键生成器
+ type: # 自增列值生成器类型,缺省表示使用默认自增列值生成器。可使用用户自定义的列值生成器或选择内置类型:SNOWFLAKE/UUID
+ props: # 属性配置, 注意:使用 SNOWFLAKE 算法,需要配置 worker.id 与
max.tolerate.time.difference.milliseconds 属性。若使用此算法生成值作分片值,建议配置
max.vibration.offset 属性
+ <property-name>: # 属性名称
+
+ bindingTables: # 绑定表规则列表
+ - <logic_table_name1, logic_table_name2, ...>
+ - <logic_table_name3, logic_table_name4, ...>
+ - <logic_table_name_x, logic_table_name_y, ...>
+ broadcastTables: # 广播表规则列表
+ - table_name1
+ - table_name2
+ - table_name_x
+
+ defaultDataSourceName: # 未配置分片规则的表将通过默认数据源定位
+ defaultDatabaseStrategy: # 默认数据库分片策略,同分库策略
+ defaultTableStrategy: # 默认表分片策略,同分库策略
+ defaultKeyGenerator: # 默认的主键生成算法 如果没有设置,默认为 SNOWFLAKE 算法
+ type: # 默认自增列值生成器类型,缺省将使用
org.apache.shardingsphere.core.keygen.generator.impl.SnowflakeKeyGenerator。可使用用户自定义的列值生成器或选择内置类型:SNOWFLAKE/UUID
+ props:
+ <property-name>: # 自增列值生成器属性配置, 比如 SNOWFLAKE 算法的 worker.id 与
max.tolerate.time.difference.milliseconds
+
+ masterSlaveRules: # 读写分离规则,详见读写分离部分
+ <data_source_name>: # 数据源名称,需要与真实数据源匹配,可配置多个 data_source_name
+ masterDataSourceName: # 详见读写分离部分
+ slaveDataSourceNames: # 详见读写分离部分
+ loadBalanceAlgorithmType: # 详见读写分离部分
+ props: # 读写分离负载算法的属性配置
+ <property-name>: # 属性值
+
+props: # 属性配置
+ sql.show: # 是否开启 SQL 显示,默认值: false
+ executor.size: # 工作线程数量,默认值: CPU 核数
+ max.connections.size.per.query: # 每个查询可以打开的最大连接数量,默认为 1
+ check.table.metadata.enabled: # 是否在启动时检查分表元数据一致性,默认值: false
```
### 读写分离
@@ -222,30 +235,18 @@ props:
#### 配置项说明
```yaml
-dataSources:
- ds_master: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_master
- username: root
- password:
- ds_slave0: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_slave0
- username: root
- password:
- ds_slave1: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_slave1
- username: root
- password:
+dataSources: # 省略数据源配置,与数据分片一致
masterSlaveRule:
- name: ds_ms
- masterDataSourceName: ds_master
- slaveDataSourceNames: [ds_slave0, ds_slave1]
-
-props:
- sql.show: true
+ name: # 读写分离数据源名称
+ masterDataSourceName: # 主库数据源名称
+ slaveDataSourceNames: # 从库数据源名称列表
+ - <data_source_name1>
+ - <data_source_name2>
+ - <data_source_name_x>
+ loadBalanceAlgorithmType: # 从库负载均衡算法类型,可选值:ROUND_ROBIN,RANDOM。若
`loadBalanceAlgorithmClassName` 存在则忽略该配置
+ props: # 读写分离负载算法的属性配置
+ <property-name>: # 属性值
```
通过 `YamlMasterSlaveDataSourceFactory` 工厂类创建 `DataSource`:
@@ -259,32 +260,22 @@ DataSource dataSource =
YamlMasterSlaveDataSourceFactory.createDataSource(yamlFi
#### 配置项说明
```yaml
-dataSource: !!org.apache.commons.dbcp2.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://127.0.0.1:3306/encrypt?serverTimezone=UTC&useSSL=false
- username: root
- password:
+dataSource: # 省略数据源配置
encryptRule:
encryptors:
- encryptor_aes:
- type: aes
- props:
- aes.key.value: 123456abc
- encryptor_md5:
- type: md5
+ <encryptor-name>:
+ type: # 加解密器类型,可自定义或选择内置类型:MD5/AES
+ props: # 属性配置, 注意:使用 AES 加密器,需要配置 AES 加密器的 KEY 属性:aes.key.value
+ aes.key.value:
tables:
- t_encrypt:
+ <table-name>:
columns:
- user_id:
- plainColumn: user_plain
- cipherColumn: user_cipher
- encryptor: encryptor_aes
- order_id:
- cipherColumn: order_cipher
- encryptor: encryptor_md5
-props:
- query.with.cipher.column: true # 是否使用密文列查询
+ <logic-column-name>:
+ plainColumn: # 存储明文的字段
+ cipherColumn: # 存储密文的字段
+ assistedQueryColumn: # 辅助查询字段,针对 ShardingQueryAssistedEncryptor
类型的加解密器进行辅助查询
+ encryptor: # 加密器名字
```
### 治理
@@ -292,15 +283,23 @@ props:
#### 配置项说明
```yaml
-# 省略数据分片、读写分离和数据脱敏配置
+dataSources: # 省略数据源配置
+shardingRule: # 省略分片规则配置
+masterSlaveRule: # 省略读写分离规则配置
+encryptRule: # 省略数据脱敏规则配置
orchestration:
- name: orchestration_ds
- overwrite: true
- registry:
- type: zookeeper
- namespace: orchestration
- serverLists: localhost:2181
+ name: # 治理实例名称
+ overwrite: # 本地配置是否覆盖注册中心配置。如果可覆盖,每次启动都以本地配置为准
+ registry: # 注册中心配置
+ type: # 配置中心类型。如:zookeeper
+ serverLists: # 连接注册中心服务器的列表。包括 IP 地址和端口号。多个地址用逗号分隔。如: host1:2181,host2:2181
+ namespace: # 注册中心的命名空间
+ digest: # 连接注册中心的权限令牌。缺省为不需要权限验证
+ operationTimeoutMilliseconds: # 操作超时的毫秒数,默认 500 毫秒
+ maxRetries: # 连接失败后的最大重试次数,默认 3 次
+ retryIntervalMilliseconds: # 重试间隔毫秒数,默认 500 毫秒
+ timeToLiveSeconds: # 临时节点存活秒数,默认 60 秒
```
## ShardingSphere-3.x
@@ -310,53 +309,117 @@ orchestration:
#### 配置项说明
```yaml
-dataSources:
- ds0: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds0
+# 以下配置截止版本为3.1
+# 配置文件中,必须配置的项目为 schemaName,dataSources,并且
shardingRule,masterSlaveRule,配置其中一个(注意,除非 server.yaml 中定义了
Orchestration,否则必须至少有一个 config-xxxx 配置文件),除此之外的其他项目为可选项
+schemaName: test # schema 名称,每个文件都是单独的 schema,多个 schema 则是多个 yaml 文件,yaml
文件命名要求是 config-xxxx.yaml 格式,虽然没有强制要求,但推荐名称中的 xxxx 与配置的 schemaName 保持一致,方便维护
+
+dataSources: # 配置数据源列表,必须是有效的 jdbc 配置,目前仅支持 MySQL 与
PostgreSQL,另外通过一些未公开(代码中可查,但可能会在未来有变化)的变量,可以配置来兼容其他支持 JDBC
的数据库,但由于没有足够的测试支持,可能会有严重的兼容性问题,配置时候要求至少有一个
+ master_ds_0: # 数据源名称,可以是合法的字符串,目前的校验规则中,没有强制性要求,只要是合法的 yaml
字符串即可,但如果要用于分库分表配置,则需要有有意义的标志(在分库分表配置中详述),以下为目前公开的合法配置项目,不包含内部配置参数
+ # 以下参数为必备参数
+ url:
jdbc:mysql://127.0.0.1:3306/demo_ds_slave_1?serverTimezone=UTC&useSSL=false #
这里的要求合法的 jdbc 连接串即可,目前尚未兼容 MySQL 8.x,需要在 maven 编译时候,升级 MySQL JDBC 版本到 5.1.46 或者
47 版本(不建议升级到 JDBC 的 8.x 系列版本,需要修改源代码,并且无法通过很多测试 case)
+ username: root # MySQL 用户名
+ password: password # MySQL 用户的明文密码
+ # 以下参数为可选参数,给出示例为默认配置,主要用于连接池控制
+ connectionTimeoutMilliseconds: 30000 # 连接超时控制
+ idleTimeoutMilliseconds: 60000 # 连接空闲时间设置
+ maxLifetimeMilliseconds: 0 # 连接的最大持有时间,0 为无限制
+ maxPoolSize: 50 # 连接池中最大维持的连接数量
+ minPoolSize: 1 # 连接池的最小连接数量
+ maintenanceIntervalMilliseconds: 30000 # 连接维护的时间间隔 atomikos 框架需求
+ # 以下配置的假设是,3307 是 3306 的从库,3309,3310 是 3308 的从库
+ slave_ds_0:
+ url:
jdbc:mysql://127.0.0.1:3307/demo_ds_slave_1?serverTimezone=UTC&useSSL=false
username: root
- password:
- ds1: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds1
+ password: password
+ master_ds_1:
+ url:
jdbc:mysql://127.0.0.1:3308/demo_ds_slave_1?serverTimezone=UTC&useSSL=false
username: root
- password:
-
-shardingRule:
- tables:
- t_order:
- actualDataNodes: ds${0..1}.t_order${0..1}
- databaseStrategy:
- inline:
- shardingColumn: user_id
- algorithmExpression: ds${user_id % 2}
- tableStrategy:
- inline:
- shardingColumn: order_id
- algorithmExpression: t_order${order_id % 2}
- keyGeneratorColumnName: order_id
- t_order_item:
- actualDataNodes: ds${0..1}.t_order_item${0..1}
- databaseStrategy:
- inline:
- shardingColumn: user_id
- algorithmExpression: ds${user_id % 2}
- tableStrategy:
- inline:
- shardingColumn: order_id
- algorithmExpression: t_order_item${order_id % 2}
- bindingTables:
- - t_order,t_order_item
- broadcastTables:
- - t_config
-
- defaultDataSourceName: ds0
- defaultTableStrategy:
- none:
- defaultKeyGeneratorClassName:
io.shardingsphere.core.keygen.DefaultKeyGenerator
-
+ password: password
+ slave_ds_1:
+ url:
jdbc:mysql://127.0.0.1:3309/demo_ds_slave_1?serverTimezone=UTC&useSSL=false
+ username: root
+ password: password
+ slave_ds_1_slave2:
+ url:
jdbc:mysql://127.0.0.1:3310/demo_ds_slave_1?serverTimezone=UTC&useSSL=false
+ username: root
+ password: password
+masterSlaveRule: # 这里配置这个规则的话,相当于是全局读写分离配置
+ name: ds_rw #
名称,合法的字符串即可,但如果涉及到在读写分离的基础上设置分库分表,则名称需要有意义才可以,另外,虽然目前没有强制要求,但主从库配置需要配置在实际关联的主从库上,如果配置的数据源之间主从是断开的状态,那么可能会发生写入的数据对于只读会话无法读取到的问题
+ # 如果一个会话发生了写入并且没有提交(显式打开事务),sharding sphere 在后续的路由中,select 都会在主库执行,直到会话提交
+ masterDataSourceName: master_ds_0 # 主库的 DataSource 名称
+ slaveDataSourceNames: # 从库的 DataSource 列表,至少需要有一个
+ - slave_ds_0
+ loadBalanceAlgorithmClassName: io.shardingsphere.api.algorithm.masterslave #
MasterSlaveLoadBalanceAlgorithm 接口的实现类,允许自定义实现 默认提供两个,配置路径为
io.shardingsphere.api.algorithm.masterslave 下的
RandomMasterSlaveLoadBalanceAlgorithm(随机 Random)与
RoundRobinMasterSlaveLoadBalanceAlgorithm(轮询:次数 % 从库数量)
+ loadBalanceAlgorithmType: # 从库负载均衡算法类型,可选值:ROUND_ROBIN,RANDOM。若
loadBalanceAlgorithmClassName 存在则忽略该配置,默认为 ROUND_ROBIN
+
+shardingRule: # sharding 的配置
+ # 配置主要分两类,一类是对整个sharding规则所有表生效的默认配置,一个是 sharing 具体某张表时候的配置
+ # 首先说默认配置
+ masterSlaveRules: # 在 shardingRule 中也可以配置 shardingRule,对分片生效,具体内容与全局
masterSlaveRule 一致,但语法为:
+ master_test_0:
+ masterDataSourceName: master_ds_0
+ slaveDataSourceNames:
+ - slave_ds_0
+ master_test_1:
+ masterDataSourceName: master_ds_1
+ slaveDataSourceNames:
+ - slave_ds_1
+ - slave_ds_1_slave2
+ defaultDataSourceName: master_test_0 # 这里的数据源允许是 dataSources 的配置项目或者
masterSlaveRules 配置的名称,配置为 masterSlaveRule 的话相当于就是配置读写分离了
+ broadcastTables: # 广播表 这里配置的表列表,对于发生的所有数据变更,都会不经 sharding
处理,而是直接发送到所有数据节点,注意此处为列表,每个项目为一个表名称
+ - broad_1
+ - broad_2
+ bindingTables: # 绑定表,也就是实际上哪些配置的 sharding 表规则需要实际生效的列表,配置为 yaml
列表,并且允许单个条目中以逗号切割,所配置表必须已经配置为逻辑表
+ - sharding_t1
+ - sharding_t2,sharding_t3
+ defaultDatabaseShardingStrategy: # 默认库级别 sharding 规则,对应代码中 ShardingStrategy
接口的实现类,目前支持 none,inline,hint,complex,standard 五种配置 注意此处默认配置仅可以配置五个中的一个
+ # 规则配置同样适合表 sharding 配置,同样是在这些算法中选择
+ none: # 不配置任何规则,SQL 会被发给所有节点去执行,这个规则没有子项目可以配置
+ inline: # 行表达式分片
+ shardingColumn: test_id # 分片列名称
+ algorithmExpression: master_test_${test_id % 2} #
分片表达式,根据指定的表达式计算得到需要路由到的数据源名称 需要是合法的 groovy 表达式,示例配置中,取余为 0 则语句路由到
master_test_0,取余为 1 则路由到 master_test_1
+ hint: # 基于标记的 sharding 分片
+ shardingAlgorithm: # 需要是 HintShardingAlgorithm 接口的实现,目前代码中,仅有为测试目的实现的
OrderDatabaseHintShardingAlgorithm,没有生产环境可用的实现
+ complex: # 支持多列的 sharding,目前无生产可用实现
+ shardingColumns: # 逗号切割的列
+ shardingAlgorithm: # ComplexKeysShardingAlgorithm 接口的实现类
+ standard: # 单列 sharding 算法,需要配合对应的
preciseShardingAlgorithm,rangeShardingAlgorithm 接口的实现使用,目前无生产可用实现
+ shardingColumn: # 列名,允许单列
+ preciseShardingAlgorithm: # preciseShardingAlgorithm 接口的实现类
+ rangeShardingAlgorithm: # rangeShardingAlgorithm 接口的实现类
+ defaultTableStrategy: # 配置参考 defaultDatabaseShardingStrategy,区别在于,inline
算法的配置中,algorithmExpression 的配置算法结果需要是实际的物理表名称,而非数据源名称
+ defaultKeyGenerator: # 默认的主键生成算法 如果没有设置,默认为 SNOWFLAKE 算法
+ column: # 自增键对应的列名称
+ type: # 自增键的类型,主要用于调用内置的主键生成算法有三个可用值:SNOWFLAKE(时间戳 +worker id+ 自增
id),UUID(java.util.UUID 类生成的随机 UUID),LEAF,其中 Snowflake 算法与 UUID 算法已经实现,LEAF
目前(2018-01-14)尚未实现
+ className: # 非内置的其他实现了 KeyGenerator 接口的类,需要注意,如果设置这个,就不能设置 type,否则 type
的设置会覆盖 class 的设置
+ props:
+ # 定制算法需要设置的参数,比如 SNOWFLAKE 算法的 worker.id 与
max.tolerate.time.difference.milliseconds
+ tables: # 配置表 sharding 的主要位置
+ sharding_t1:
+ actualDataNodes: master_test_${0..1}.t_order${0..1} # sharding
表对应的数据源以及物理名称,需要用表达式处理,表示表实际上在哪些数据源存在,配置示例中,意思是总共存在 4 个分片
master_test_0.t_order0,master_test_0.t_order1,master_test_1.t_order0,master_test_1.t_order1
+ # 需要注意的是,必须保证设置 databaseStrategy 可以路由到唯一的 dataSource,tableStrategy 可以路由到
dataSource 中唯一的物理表上,否则可能导致错误:一个 insert 语句被插入到多个实际物理表中
+ databaseStrategy: # 局部设置会覆盖全局设置,参考 defaultDatabaseShardingStrategy
+ tableStrategy: # 局部设置会覆盖全局设置,参考 defaultTableStrategy
+ keyGenerator: # 局部设置会覆盖全局设置,参考 defaultKeyGenerator
+ logicIndex: # 逻辑索引名称 由于 Oracle,PG 这种数据库中,索引与表共用命名空间,如果接受到 drop index
语句,执行之前,会通过这个名称配置的确定对应的实际物理表名称
props:
- sql.show: true
+ sql.show: # 是否开启 SQL 显示,默认值: false
+ acceptor.size: # accept 连接的线程数量,默认为 cpu 核数 2 倍
+ executor.size: # 工作线程数量最大,默认值: 无限制
+ max.connections.size.per.query: # 每个查询可以打开的最大连接数量,默认为 1
+ proxy.frontend.flush.threshold: # proxy 的服务时候,对于单个大查询,每多少个网络包返回一次
+ check.table.metadata.enabled: # 是否在启动时检查分表元数据一致性,默认值: false
+ proxy.transaction.type: # 默认 LOCAL,proxy 的事务模型 允许 LOCAL,XA,BASE 三个值 LOCAL
无分布式事务,XA 则是采用 atomikos 实现的分布式事务 BASE 目前尚未实现
+ proxy.opentracing.enabled: # 是否启用 opentracing
+ proxy.backend.use.nio: # 是否采用 netty 的 NIO 机制连接后端数据库,默认 False ,使用 epoll 机制
+ proxy.backend.max.connections: # 使用 NIO 而非 epoll 的话,proxy 后台连接每个 netty
客户端允许的最大连接数量(注意不是数据库连接限制) 默认为 8
+ proxy.backend.connection.timeout.seconds: # 使用 nio 而非 epoll 的话,proxy
后台连接的超时时间,默认 60s
+ check.table.metadata.enabled: # 是否在启动时候,检查 sharing 的表的实际元数据是否一致,默认 False
+
+configMap: # 用户自定义配置
+ key1: value1
+ key2: value2
+ keyx: valuex
```
### 读写分离
@@ -364,31 +427,27 @@ props:
#### 配置项说明
```yaml
-dataSources:
- ds_master: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_master
- username: root
- password:
- ds_slave0: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_slave0
- username: root
- password:
- ds_slave1: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_slave1
- username: root
- password:
+dataSources: # 省略数据源配置,与数据分片一致
masterSlaveRule:
- name: ds_ms
- masterDataSourceName: ds_master
- slaveDataSourceNames: [ds_slave0, ds_slave1]
- props:
- sql.show: true
- configMap:
- key1: value1
+ name: # 读写分离数据源名称
+ masterDataSourceName: # 主库数据源名称
+ slaveDataSourceNames: # 从库数据源名称列表
+ - <data_source_name1>
+ - <data_source_name2>
+ - <data_source_name_x>
+ loadBalanceAlgorithmClassName: # 从库负载均衡算法类名称。该类需实现
MasterSlaveLoadBalanceAlgorithm 接口且提供无参数构造器
+ loadBalanceAlgorithmType: # 从库负载均衡算法类型,可选值:ROUND_ROBIN,RANDOM。若
`loadBalanceAlgorithmClassName` 存在则忽略该配置
+
+props: # 属性配置
+ sql.show: # 是否开启 SQL 显示,默认值: false
+ executor.size: # 工作线程数量,默认值: CPU 核数
+ check.table.metadata.enabled: # 是否在启动时检查分表元数据一致性,默认值: false
+
+configMap: # 用户自定义配置
+ key1: value1
+ key2: value2
+ keyx: valuex
```
通过 `MasterSlaveDataSourceFactory` 工厂类创建 `DataSource`:
@@ -402,14 +461,21 @@ DataSource dataSource =
MasterSlaveDataSourceFactory.createDataSource(yamlFile);
#### 配置项说明
```yaml
-# 省略数据分片和读写分离配置
+dataSources: # 省略数据源配置
+shardingRule: # 省略分片规则配置
+masterSlaveRule: # 省略读写分离规则配置
orchestration:
- name: orchestration_ds
- overwrite: true
- registry:
- namespace: orchestration
- serverLists: localhost:2181
+ name: # 数据治理实例名称
+ overwrite: # 本地配置是否覆盖注册中心配置。如果可覆盖,每次启动都以本地配置为准
+ registry: # 注册中心配置
+ serverLists: # 连接注册中心服务器的列表。包括 IP 地址和端口号。多个地址用逗号分隔。如: host1:2181,host2:2181
+ namespace: # 注册中心的命名空间
+ digest: # 连接注册中心的权限令牌。缺省为不需要权限验证
+ operationTimeoutMilliseconds: # 操作超时的毫秒数,默认 500 毫秒
+ maxRetries: # 连接失败后的最大重试次数,默认 3 次
+ retryIntervalMilliseconds: # 重试间隔毫秒数,默认 500 毫秒
+ timeToLiveSeconds: # 临时节点存活秒数,默认 60 秒
```
## ShardingSphere-2.x
@@ -482,10 +548,10 @@ shardingRule:
#### 支持项
1. 提供了一主多从的读写分离配置,可独立使用,也可配合分库分表使用。
-2. 独立使用读写分离支持SQL透传。
+2. 独立使用读写分离支持 SQL 透传。
3. 同一线程且同一数据库连接内,如有写入操作,以后的读操作均从主库读取,用于保证数据一致性。
-4. Spring命名空间。
-5. 基于Hint的强制主库路由。
+4. Spring 命名空间。
+5. 基于 Hin t的强制主库路由。
#### 不支持范围
@@ -534,43 +600,43 @@ DataSource dataSource =
MasterSlaveDataSourceFactory.createDataSource(yamlFile);
#### 配置项说明
-Zookeeper分库分表编排配置项说明
+Zookeeper 分库分表编排配置项说明
```yaml
-dataSources: 数据源配置
-
-shardingRule: 分片规则配置
-
-orchestration: Zookeeper编排配置
- name: 编排服务节点名称
- overwrite: 本地配置是否可覆盖注册中心配置。如果可覆盖,每次启动都以本地配置为准
- zookeeper: Zookeeper注册中心配置
- namespace: Zookeeper的命名空间
- serverLists: 连接Zookeeper服务器的列表。包括IP地址和端口号。多个地址用逗号分隔。如:
host1:2181,host2:2181
- baseSleepTimeMilliseconds: 等待重试的间隔时间的初始值。单位:毫秒
- maxSleepTimeMilliseconds: 等待重试的间隔时间的最大值。单位:毫秒
- maxRetries: 最大重试次数
- sessionTimeoutMilliseconds: 会话超时时间。单位:毫秒
- connectionTimeoutMilliseconds: 连接超时时间。单位:毫秒
- digest: 连接Zookeeper的权限令牌。缺省为不需要权限验证
+dataSources: # 数据源配置
+
+shardingRule: # 分片规则配置
+
+orchestration: # Zookeeper 编排配置
+ name: # 编排服务节点名称
+ overwrite: # 本地配置是否可覆盖注册中心配置。如果可覆盖,每次启动都以本地配置为准
+ zookeeper: # Zookeeper 注册中心配置
+ namespace: # Zookeeper 的命名空间
+ serverLists: # 连接 Zookeeper 服务器的列表。包括 IP 地址和端口号。多个地址用逗号分隔。如:
host1:2181,host2:2181
+ baseSleepTimeMilliseconds: # 等待重试的间隔时间的初始值。单位:毫秒
+ maxSleepTimeMilliseconds: # 等待重试的间隔时间的最大值。单位:毫秒
+ maxRetries: # 最大重试次数
+ sessionTimeoutMilliseconds: # 会话超时时间。单位:毫秒
+ connectionTimeoutMilliseconds: # 连接超时时间。单位:毫秒
+ digest: # 连接 Zookeeper 的权限令牌。缺省为不需要权限验证
```
-Etcd分库分表编排配置项说明
+Etcd 分库分表编排配置项说明
```yaml
-dataSources: 数据源配置
-
-shardingRule: 分片规则配置
-
-orchestration: Etcd编排配置
- name: 编排服务节点名称
- overwrite: 本地配置是否可覆盖注册中心配置。如果可覆盖,每次启动都以本地配置为准
- etcd: Etcd注册中心配置
- serverLists: 连接Etcd服务器的列表。包括IP地址和端口号。多个地址用逗号分隔。如:
http://host1:2379,http://host2:2379
- timeToLiveSeconds: 临时节点存活时间。单位:秒
- timeoutMilliseconds: 每次请求的超时时间。单位:毫秒
- maxRetries: 每次请求的最大重试次数
- retryIntervalMilliseconds: 重试间隔时间。单位:毫秒
+dataSources: # 数据源配置
+
+shardingRule: # 分片规则配置
+
+orchestration: # Etcd 编排配置
+ name: # 编排服务节点名称
+ overwrite: # 本地配置是否可覆盖注册中心配置。如果可覆盖,每次启动都以本地配置为准
+ etcd: # Etcd 注册中心配置
+ serverLists: # 连接 Etcd 服务器的列表。包括 IP 地址和端口号。多个地址用逗号分隔。如:
http://host1:2379,http://host2:2379
+ timeToLiveSeconds: # 临时节点存活时间。单位:秒
+ timeoutMilliseconds: # 每次请求的超时时间。单位:毫秒
+ maxRetries: # 每次请求的最大重试次数
+ retryIntervalMilliseconds: # 重试间隔时间。单位:毫秒
```
分库分表编排数据源构建方式
diff --git
a/docs/document/content/reference/api-change-history/shardingsphere-jdbc/yaml-configuration.en.md
b/docs/document/content/reference/api-change-history/shardingsphere-jdbc/yaml-configuration.en.md
index 141d90d..c8c3d88 100644
---
a/docs/document/content/reference/api-change-history/shardingsphere-jdbc/yaml-configuration.en.md
+++
b/docs/document/content/reference/api-change-history/shardingsphere-jdbc/yaml-configuration.en.md
@@ -164,57 +164,69 @@ governance:
#### Configuration Item Explanation
```yaml
-dataSources:
- ds0: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds0
- username: root
- password:
- ds1: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds1
- username: root
- password:
+dataSources: # Data sources configuration, multiple `data_source_name`
available
+ <data_source_name>: # <!!Data source pool implementation class> `!!` means
class instantiation
+ driverClassName: # Class name of database driver
+ url: # Database URL
+ username: # Database username
+ password: # Database password
+ # ... Other properties for data source pool
-shardingRule:
- tables:
- t_order:
- actualDataNodes: ds${0..1}.t_order${0..1}
- databaseStrategy:
- inline:
- shardingColumn: user_id
- algorithmExpression: ds${user_id % 2}
- tableStrategy:
- inline:
- shardingColumn: order_id
- algorithmExpression: t_order${order_id % 2}
+shardingRule:
+ tables: # Sharding rule configuration, multiple `logic_table_name` available
+ <logic_table_name>: # Name of logic table
+ actualDataNodes: # Describe data source names and actual tables,
delimiter as point, multiple data nodes separated with comma, support inline
expression. Absent means sharding databases only. Example: ds${0..7}.tbl${0..7}
+
+ databaseStrategy: # Databases sharding strategy, use default databases
sharding strategy if absent. sharding strategy below can choose only one
+ standard: # Standard sharding scenario for single sharding column
+ shardingColumn: # Name of sharding column
+ preciseAlgorithmClassName: # Precise algorithm class name used for
`=` and `IN`. This class need to implements PreciseShardingAlgorithm, and
require a no argument constructor
+ rangeAlgorithmClassName: # Range algorithm class name used for
`BETWEEN`. This class need to implements RangeShardingAlgorithm, and require a
no argument constructor
+ complex: # Complex sharding scenario for multiple sharding columns
+ shardingColumns: # Names of sharding columns. Multiple columns
separated with comma
+ algorithmClassName: # Complex sharding algorithm class name. This
class need to implements ComplexKeysShardingAlgorithm, and require a no
argument constructor
+ inline: # Inline expression sharding scenario for single sharding
column
+ shardingColumn: # Name of sharding column
+ algorithmInlineExpression: # Inline expression for sharding
algorithm
+ hint: # Hint sharding strategy
+ algorithmClassName: # Hint sharding algorithm class name. This
class need to implements HintShardingAlgorithm, and require a no argument
constructor
+ none: # Do not sharding
+ tableStrategy: # Tables sharding strategy, Same as databases sharding
strategy
keyGenerator:
- type: SNOWFLAKE
- column: order_id
- t_order_item:
- actualDataNodes: ds${0..1}.t_order_item${0..1}
- databaseStrategy:
- inline:
- shardingColumn: user_id
- algorithmExpression: ds${user_id % 2}
- tableStrategy:
- inline:
- shardingColumn: order_id
- algorithmExpression: t_order_item${order_id % 2}
- bindingTables:
- - t_order,t_order_item
- broadcastTables:
- - t_config
-
- defaultDataSourceName: ds0
- defaultTableStrategy:
- none:
+ column: # Column name of key generator
+ type: # Type of key generator, use default key generator if absent,
and there are three types to choose, that is, SNOWFLAKE/UUID
+ props: # Properties, Notice: when use SNOWFLAKE, `worker.id` and
`max.tolerate.time.difference.milliseconds` for `SNOWFLAKE` need to be set. To
use the generated value of this algorithm as sharding value, it is recommended
to configure `max.vibration.offset`
+
+ bindingTables: # Binding table rule configurations
+ - <logic_table_name1, logic_table_name2, ...>
+ - <logic_table_name3, logic_table_name4, ...>
+ - <logic_table_name_x, logic_table_name_y, ...>
+ broadcastTables: # Broadcast table rule configurations
+ - table_name1
+ - table_name2
+ - table_name_x
+
+ defaultDataSourceName: # If table not configure at table rule, will route to
defaultDataSourceName
+ defaultDatabaseStrategy: # Default strategy for sharding databases, same as
databases sharding strategy
+ defaultTableStrategy: # Default strategy for sharding tables, same as tables
sharding strategy
defaultKeyGenerator:
- type: SNOWFLAKE
- column: order_id
-
-props:
- sql.show: true
+ type: # Type of default key generator, use user-defined ones or built-in
ones, e.g. SNOWFLAKE, UUID. Default key generator is
`org.apache.shardingsphere.core.keygen.generator.impl.SnowflakeKeyGenerator`
+ column: # Column name of default key generator
+ props: # Properties of default key generator, e.g. `worker.id` and
`max.tolerate.time.difference.milliseconds` for `SNOWFLAKE`
+
+ masterSlaveRules: # Read-write splitting rule configuration, more details
can reference Read-write splitting part
+ <data_source_name>: # Data sources configuration, need consist with data
source map, multiple `data_source_name` available
+ masterDataSourceName: # more details can reference Read-write splitting
part
+ slaveDataSourceNames: # more details can reference Read-write splitting
part
+ loadBalanceAlgorithmType: # more details can reference Read-write
splitting part
+ props: # Properties configuration of load balance algorithm
+ <property-name>: # property key value pair
+
+props: # Properties
+ sql.show: # To show SQLS or not, default value: false
+ executor.size: # The number of working threads, default value: CPU count
+ check.table.metadata.enabled: # To check the metadata consistency of all the
tables or not, default value : false
+ max.connections.size.per.query: # The maximum connection number allocated by
each query of each physical database. default value: 1
```
### Read-Write Split
@@ -222,30 +234,24 @@ props:
#### Configuration Item Explanation
```yaml
-dataSources:
- ds_master: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_master
- username: root
- password:
- ds_slave0: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_slave0
- username: root
- password:
- ds_slave1: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_slave1
- username: root
- password:
+dataSources: # Omit data source configurations; keep it consistent with data
sharding
masterSlaveRule:
- name: ds_ms
- masterDataSourceName: ds_master
- slaveDataSourceNames: [ds_slave0, ds_slave1]
-
-props:
- sql.show: true
+ name: # Read-write split data source name
+ masterDataSourceName: # Master data source name
+ slaveDataSourceNames: # Slave data source name
+ - <data_source_name1>
+ - <data_source_name2>
+ - <data_source_name_x>
+ loadBalanceAlgorithmType: # Slave database load balance algorithm type;
optional value, ROUND_ROBIN and RANDOM, can be omitted if
`loadBalanceAlgorithmClassName` exists
+ props: # Properties configuration of load balance algorithm
+ <property-name>: # property key value pair
+
+props: # Property configuration
+ sql.show: # Show SQL or not; default value: false
+ executor.size: # Executing thread number; default value: CPU core number
+ check.table.metadata.enabled: # Whether to check table metadata consistency
when it initializes; default value: false
+ max.connections.size.per.query: # The maximum connection number allocated by
each query of each physical database. default value: 1
```
Create a `DataSource` through the `YamlMasterSlaveDataSourceFactory` factory
class:
@@ -259,32 +265,22 @@ DataSource dataSource =
YamlMasterSlaveDataSourceFactory.createDataSource(yamlFi
#### Configuration Item Explanation
```yaml
-dataSource: !!org.apache.commons.dbcp2.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://127.0.0.1:3306/encrypt?serverTimezone=UTC&useSSL=false
- username: root
- password:
+dataSource: # Ignore data sources configuration
encryptRule:
encryptors:
- encryptor_aes:
- type: aes
- props:
- aes.key.value: 123456abc
- encryptor_md5:
- type: md5
+ <encryptor-name>:
+ type: # Encryptor type
+ props: # Properties, e.g. `aes.key.value` for AES encryptor
+ aes.key.value:
tables:
- t_encrypt:
+ <table-name>:
columns:
- user_id:
- plainColumn: user_plain
- cipherColumn: user_cipher
- encryptor: encryptor_aes
- order_id:
- cipherColumn: order_cipher
- encryptor: encryptor_md5
-props:
- query.with.cipher.column: true # use ciphertext column query
+ <logic-column-name>:
+ plainColumn: # Plaintext column name
+ cipherColumn: # Ciphertext column name
+ assistedQueryColumn: # AssistedColumns for query,when use
ShardingQueryAssistedEncryptor, it can help query encrypted data
+ encryptor: # Encrypt name
```
### Orchestration
@@ -292,15 +288,23 @@ props:
#### Configuration Item Explanation
```yaml
-# Omit data sharding, Read-Write split, and Data masking configuration.
+dataSources: # Omit data source configurations
+shardingRule: # Omit sharding rule configurations
+masterSlaveRule: # Omit read-write split rule configurations
+encryptRule: # Omit encrypt rule configurations
orchestration:
- name: orchestration_ds
- overwrite: true
- registry:
- type: zookeeper
- namespace: orchestration
- serverLists: localhost:2181
+ name: # Orchestration instance name
+ overwrite: # Whether to overwrite local configurations with registry center
configurations; if it can, each initialization should refer to local
configurations
+ registry: # Registry center configuration
+ type: # Registry center type. Example:zookeeper
+ serverLists: # The list of servers that connect to registry center,
including IP and port number; use commas to seperate addresses, such as:
host1:2181,host2:2181
+ namespace: # Registry center namespace
+ digest: # The token that connects to the registry center; default means
there is no need for authentication
+ operationTimeoutMilliseconds: # Default value: 500 milliseconds
+ maxRetries: # Maximum retry time after failing; default value: 3 times
+ retryIntervalMilliseconds: # Interval time to retry; default value: 500
milliseconds
+ timeToLiveSeconds: # Living time of temporary nodes; default value: 60
seconds
```
## ShardingSphere-3.x
@@ -310,53 +314,73 @@ orchestration:
#### Configuration Item Explanation
```yaml
-dataSources:
- ds0: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds0
- username: root
- password:
- ds1: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds1
- username: root
- password:
+dataSources: # Data sources configuration, multiple `data_source_name`
available
+ <data_source_name>: # <!!Data source pool implementation class> `!!` means
class instantiation
+ driverClassName: # Class name of database driver
+ url: # Database URL
+ username: # Database username
+ password: # Database password
+ # ... Other properties for data source pool
-shardingRule:
- tables:
- t_order:
- actualDataNodes: ds${0..1}.t_order${0..1}
- databaseStrategy:
- inline:
- shardingColumn: user_id
- algorithmExpression: ds${user_id % 2}
- tableStrategy:
- inline:
- shardingColumn: order_id
- algorithmExpression: t_order${order_id % 2}
- keyGeneratorColumnName: order_id
- t_order_item:
- actualDataNodes: ds${0..1}.t_order_item${0..1}
- databaseStrategy:
- inline:
- shardingColumn: user_id
- algorithmExpression: ds${user_id % 2}
- tableStrategy:
- inline:
- shardingColumn: order_id
- algorithmExpression: t_order_item${order_id % 2}
- bindingTables:
- - t_order,t_order_item
- broadcastTables:
- - t_config
-
- defaultDataSourceName: ds0
- defaultTableStrategy:
- none:
- defaultKeyGeneratorClassName:
io.shardingsphere.core.keygen.DefaultKeyGenerator
-
-props:
- sql.show: true
+shardingRule:
+ tables: # Sharding rule configuration, multiple `logic_table_name` available
+ <logic_table_name>: # Name of logic table
+ actualDataNodes: # Describe data source names and actual tables,
delimiter as point, multiple data nodes separated with comma, support inline
expression. Absent means sharding databases only. Example: ds${0..7}.tbl${0..7}
+
+ databaseStrategy: # Databases sharding strategy, use default databases
sharding strategy if absent. sharding strategy below can choose only one
+ standard: # Standard sharding scenario for single sharding column
+ shardingColumn: # Name of sharding column
+ preciseAlgorithmClassName: # Precise algorithm class name used for
`=` and `IN`. This class need to implements PreciseShardingAlgorithm, and
require a no argument constructor
+ rangeAlgorithmClassName: # Range algorithm class name used for
`BETWEEN`. This class need to implements RangeShardingAlgorithm, and require a
no argument constructor
+ complex: # Complex sharding scenario for multiple sharding columns
+ shardingColumns: # Names of sharding columns. Multiple columns
separated with comma
+ algorithmClassName: # Complex sharding algorithm class name. This
class need to implements ComplexKeysShardingAlgorithm, and require a no
argument constructor
+ inline: # Inline expression sharding scenario for single sharding
column
+ shardingColumn: # Name of sharding column
+ algorithmInlineExpression: # Inline expression for sharding
algorithm
+ hint: # Hint sharding strategy
+ algorithmClassName: # Hint sharding algorithm class name. This
class need to implements HintShardingAlgorithm, and require a no argument
constructor
+ none: # Do not sharding
+ tableStrategy: # Tables sharding strategy, Same as databases sharding
strategy
+
+ keyGeneratorColumnName: # Column name of key generator, do not use Key
generator if absent
+ keyGeneratorClassName: # Key generator, use default key generator if
absent. This class need to implements KeyGenerator, and require a no argument
constructor
+
+ logicIndex: # Name if logic index. If use `DROP INDEX XXX` SQL in
Oracle/PostgreSQL, This property needs to be set for finding the actual tables
+ bindingTables: # Binding table rule configurations
+ - <logic_table_name1, logic_table_name2, ...>
+ - <logic_table_name3, logic_table_name4, ...>
+ - <logic_table_name_x, logic_table_name_y, ...>
+ bindingTables: # Broadcast table rule configurations
+ - table_name1
+ - table_name2
+ - table_name_x
+
+ defaultDataSourceName: # If table not configure at table rule, will route to
defaultDataSourceName
+ defaultDatabaseStrategy: # Default strategy for sharding databases, same as
databases sharding strategy
+ defaultTableStrategy: # Default strategy for sharding tables, same as tables
sharding strategy
+ defaultKeyGeneratorClassName: # Default key generator class name, default
value is `io.shardingsphere.core.keygen.DefaultKeyGenerator`. This class need
to implements KeyGenerator, and require a no argument constructor
+
+ masterSlaveRules: # Read-write splitting rule configuration, more details
can reference Read-write splitting part
+ <data_source_name>: # Data sources configuration, need consist with data
source map, multiple `data_source_name` available
+ masterDataSourceName: # more details can reference Read-write splitting
part
+ slaveDataSourceNames: # more details can reference Read-write splitting
part
+ loadBalanceAlgorithmType: # more details can reference Read-write
splitting part
+ loadBalanceAlgorithmClassName: # more details can reference Read-write
splitting part
+ configMap: # User-defined arguments
+ key1: value1
+ key2: value2
+ keyx: valuex
+
+props: # Properties
+ sql.show: # To show SQLS or not, default value: false
+ executor.size: # The number of working threads, default value: CPU count
+ check.table.metadata.enabled: #T o check the metadata consistency of all the
tables or not, default value : false
+
+configMap: # User-defined arguments
+ key1: value1
+ key2: value2
+ keyx: valuex
```
### Read-Write Split
@@ -364,31 +388,27 @@ props:
#### Configuration Item Explanation
```yaml
-dataSources:
- ds_master: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_master
- username: root
- password:
- ds_slave0: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_slave0
- username: root
- password:
- ds_slave1: !!org.apache.commons.dbcp.BasicDataSource
- driverClassName: com.mysql.jdbc.Driver
- url: jdbc:mysql://localhost:3306/ds_slave1
- username: root
- password:
+dataSources: # Ignore data sources configuration, same as sharding
masterSlaveRule:
- name: ds_ms
- masterDataSourceName: ds_master
- slaveDataSourceNames: [ds_slave0, ds_slave1]
- props:
- sql.show: true
- configMap:
- key1: value1
+ name: # Name of master slave data source
+ masterDataSourceName: # Name of master data source
+ slaveDataSourceNames: # Names of Slave data sources
+ - <data_source_name1>
+ - <data_source_name2>
+ - <data_source_name_x>
+ loadBalanceAlgorithmClassName: # Load balance algorithm class name. This
class need to implements MasterSlaveLoadBalanceAlgorithm, and require a no
argument constructor
+ loadBalanceAlgorithmType: # Load balance algorithm type, values should be:
`ROUND_ROBIN` or `RANDOM`. Ignore if `loadBalanceAlgorithmClassName` is present
+
+props: # Properties
+ sql.show: # To show SQLS or not, default value: false
+ executor.size: # The number of working threads, default value: CPU count
+ check.table.metadata.enabled: # To check the metadata consistency of all the
tables or not, default value : false
+
+configMap: # User-defined arguments
+ key1: value1
+ key2: value2
+ keyx: valuex
```
Create a `DataSource` through the `YamlMasterSlaveDataSourceFactory` factory
class:
@@ -402,14 +422,21 @@ DataSource dataSource =
MasterSlaveDataSourceFactory.createDataSource(yamlFile);
#### Configuration Item Explanation
```yaml
-# Omit data sharding, Read-Write split configuration.
+dataSources: # Ignore data sources configuration
+shardingRule: # Ignore sharding rule configuration
+masterSlaveRule: # Ignore master slave rule configuration
orchestration:
- name: orchestration_ds
- overwrite: true
- registry:
- namespace: orchestration
- serverLists: localhost:2181
+ name: # Name of orchestration instance
+ overwrite: # Use local configuration to overwrite registry center or not
+ registry: # Registry configuration
+ serverLists: # Registry servers list, multiple split as comma. Example:
host1:2181,host2:2181
+ namespace: # Namespace of registry
+ digest: # Digest for registry. Default is not need digest.
+ operationTimeoutMilliseconds: # Operation timeout time in milliseconds,
default value is 500 milliseconds
+ maxRetries: # Max number of times to retry, default value is 3
+ retryIntervalMilliseconds: # Time interval in milliseconds on each retry,
default value is 500 milliseconds
+ timeToLiveSeconds: # Time to live in seconds of ephemeral keys, default
value is 60 seconds
```
## ShardingSphere-2.x