lolkt opened a new issue, #37767:
URL: https://github.com/apache/shardingsphere/issues/37767
This configuration uses both dynamic data sources and read/write splitting.
Database sharding and table partitioning may be needed in the future, so Nacos
is being used.
<!-- ShardingSphere -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core</artifactId>
<version>5.4.1</version>
</dependency>
<dependency>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
<version>1.33</version>
</dependency>
Nacos
`spring:
application:
name: c-user
# datasource
datasource:
dynamic:
primary: sharding
strict: false
datasource:
uid:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://192.168.1.116:3307/uid_center?useSSL=false
username: root
password: dmyz@2025#
druid:
initial-size: 5
min-idle: 5
max-active: 200
max-wait: 3000
time-between-eviction-runs-millis: 60000
min-evictable-idle-time-millis: 300000
max-evictable-idle-time-millis: 400000
validation-query: SELECT 1 FROM DUAL
test-while-idle: true
test-on-borrow: false
test-on-return: false
pool-prepared-statements: false
max-pool-prepared-statement-per-connection-size: -1
filter:
stat:
enabled: true
slf4j:
enabled: true
wall:
enabled: true
config:
comment-allow: true
connection-properties:
druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500
web-stat-filter:
enabled: true
url-pattern: /*
exclusions: "*.js,*.gif,*.png,*.jpg,*.css,*.ico,/druid/*"
# ShardingSphere 5.x 读写分离配置
sharding:
enabled: true
config:
# 关键配置:指定逻辑数据库名称
databaseName: cornerstone_user
mode:
type: Standalone
repository:
type: JDBC
dataSources:
ds-master:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.cj.jdbc.Driver
jdbcUrl:
jdbc:mysql://192.168.1.116:3307/cornerstone_user?useUnicode=true&characterEncoding=utf-8&serverTimezone=Asia/Shanghai
username: root
password: dmyz@2025#
ds-slave0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.cj.jdbc.Driver
jdbcUrl:
jdbc:mysql://192.168.1.116:3308/cornerstone_user?useUnicode=true&characterEncoding=utf-8&serverTimezone=Asia/Shanghai
username: root
password: dmyz@2025#
ds-slave1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.cj.jdbc.Driver
jdbcUrl:
jdbc:mysql://192.168.1.116:3308/cornerstone_user?useUnicode=true&characterEncoding=utf-8&serverTimezone=Asia/Shanghai
username: root
password: dmyz@2025#
rules:
readwriteSplitting:
dataSources:
readwrite_ds:
writeDataSourceName: ds-master
readDataSourceNames:
- ds-slave0
- ds-slave1
loadBalancerName: round_robin
loadBalancers:
round_robin:
type: ROUND_ROBIN
# 单表规则:非分片表走读写分离
single:
defaultDataSource: readwrite_ds
props:
sql-show: true
# 关键配置:禁用元数据加载检查,允许动态发现表
check-table-metadata-enabled: false
# 或者启用元数据加载
# metadata-loader-enabled: true`
JAVA
@Slf4j
@Configuration
@AutoConfigureBefore({DynamicDataSourceAutoConfiguration.class,
SpringBootConfiguration.class})
public class MyDataSourceConfiguration {
private final DynamicDataSourceProperties properties;
/**
* ShardingSphere 数据源 (可选,当 sharding.enabled=true 时存在)
*/
@Lazy
@Autowired
private DataSource shardingDataSource;
public MyDataSourceConfiguration(DynamicDataSourceProperties properties)
{
this.properties = properties;
}
@Bean
public DynamicDataSourceProvider dynamicDataSourceProvider() {
return () -> {
Map<String, DataSource> dataSourceMap = new HashMap<>(4);
// 如果 ShardingSphere 数据源存在,则注册
if (shardingDataSource != null) {
log.info("Registering ShardingSphere datasource as
'sharding'");
dataSourceMap.put("sharding", shardingDataSource);
} else {
log.info("ShardingSphere datasource not found, skipping
registration");
}
return dataSourceMap;
};
}
/**
* 将动态数据源设置为首选的
* 当spring存在多个数据源时, 自动注入的是首选的对象
*/
@Primary
@Bean
public DataSource dataSource() {
DynamicRoutingDataSource dataSource = new DynamicRoutingDataSource();
dataSource.setPrimary(properties.getPrimary());
dataSource.setStrict(properties.getStrict());
dataSource.setStrategy(properties.getStrategy());
dataSource.setP6spy(properties.getP6spy());
dataSource.setSeata(properties.getSeata());
return dataSource;
}
}
@Data
@Slf4j
@Configuration
@ConfigurationProperties(prefix = "sharding")
public class ShardingJdbcConfig {
// 配置 key 常量
private static final String KEY_DATA_SOURCES = "dataSources";
private static final String KEY_RULES = "rules";
private static final String KEY_SHARDING = "sharding";
private static final String KEY_READWRITE_SPLITTING =
"readwriteSplitting";
private static final String KEY_SINGLE = "single";
private static final String KEY_TABLES = "tables";
private static final String KEY_STANDARD = "standard";
private static final String KEY_SHARDING_COLUMN = "shardingColumn";
private static final String KEY_SHARDING_ALGORITHM_NAME =
"shardingAlgorithmName";
private static final String KEY_TYPE = "type";
private static final String KEY_PROPS = "props";
/**
* 是否启用 ShardingSphere
*/
private boolean enabled = false;
/**
* ShardingSphere 配置
*/
private Map<String, Object> config;
/**
* 业务数据源 - ShardingSphere 5.x
*/
@Bean
public DataSource shardingDataSource() throws SQLException {
if (config == null || config.isEmpty()) {
throw new IllegalArgumentException("sharding.config must be
configured");
}
log.info("ShardingSphere enabled, creating datasource from
sharding.config");
// 获取数据库名称
String databaseName = getDatabaseName();
log.info("ShardingSphere database name: {}", databaseName);
// 解析数据源配置
Map<String, DataSource> dataSourceMap = createDataSources();
// 解析规则配置
Collection<RuleConfiguration> ruleConfigs =
createRuleConfigurations();
// 解析属性配置
Properties props = createProperties();
// 使用带数据库名的 API
return
ShardingSphereDataSourceFactory.createDataSource(databaseName, dataSourceMap,
ruleConfigs, props);
}
/**
* 获取数据库名称
* 优先级:config.databaseName > props.default-database-name > 从 jdbcUrl 解析
*/
private String getDatabaseName() {
// 1. 从 config.databaseName 获取
if (config.containsKey("databaseName")) {
return (String) config.get("databaseName");
}
// 2. 从 props.default-database-name 获取
@SuppressWarnings("unchecked")
Map<String, Object> props = (Map<String, Object>)
config.get("props");
if (props != null && props.containsKey("default-database-name")) {
return (String) props.get("default-database-name");
}
// 3. 从第一个数据源的 jdbcUrl 解析
@SuppressWarnings("unchecked")
Map<String, Object> dataSources = (Map<String, Object>)
config.get(KEY_DATA_SOURCES);
if (dataSources != null && !dataSources.isEmpty()) {
Map.Entry<String, Object> firstEntry =
dataSources.entrySet().iterator().next();
@SuppressWarnings("unchecked")
Map<String, Object> dsConfig = (Map<String, Object>)
firstEntry.getValue();
String jdbcUrl = (String) dsConfig.get("jdbcUrl");
if (jdbcUrl != null) {
// 从 jdbc:mysql://host:port/database?params 中提取 database
int lastSlash = jdbcUrl.lastIndexOf('/');
int questionMark = jdbcUrl.indexOf('?', lastSlash);
if (lastSlash > 0) {
if (questionMark > 0) {
return jdbcUrl.substring(lastSlash + 1,
questionMark);
} else {
return jdbcUrl.substring(lastSlash + 1);
}
}
}
}
// 4. 默认值
log.warn("Cannot determine database name, using default: logic_db");
return "logic_db";
}
@SuppressWarnings("unchecked")
private Map<String, DataSource> createDataSources() {
Map<String, DataSource> result = new LinkedHashMap<>();
Map<String, Object> dataSources = (Map<String, Object>)
config.get(KEY_DATA_SOURCES);
if (dataSources != null) {
for (Map.Entry<String, Object> entry : dataSources.entrySet()) {
Map<String, Object> dsConfig = (Map<String, Object>)
entry.getValue();
result.put(entry.getKey(), createHikariDataSource(dsConfig));
}
}
return result;
}
private DataSource createHikariDataSource(Map<String, Object> dsConfig) {
com.zaxxer.hikari.HikariDataSource ds = new
com.zaxxer.hikari.HikariDataSource();
ds.setDriverClassName((String) dsConfig.get("driverClassName"));
ds.setJdbcUrl((String) dsConfig.get("jdbcUrl"));
ds.setUsername((String) dsConfig.get("username"));
ds.setPassword((String) dsConfig.get("password"));
return ds;
}
@SuppressWarnings("unchecked")
private Collection<RuleConfiguration> createRuleConfigurations() {
List<RuleConfiguration> result = new ArrayList<>();
Map<String, Object> rules = (Map<String, Object>)
config.get(KEY_RULES);
log.info("ShardingSphere rules keys: {}", rules != null ?
rules.keySet() : "null");
if (rules != null) {
// 读写分离规则
if (rules.containsKey(KEY_READWRITE_SPLITTING)) {
log.info("Creating ReadwriteSplittingRuleConfiguration...");
result.add(createReadwriteSplittingRuleConfiguration((Map<String, Object>)
rules.get(KEY_READWRITE_SPLITTING)));
}
// 分片规则
if (rules.containsKey(KEY_SHARDING)) {
log.info("Creating ShardingRuleConfiguration...");
result.add(createShardingRuleConfiguration((Map<String,
Object>) rules.get(KEY_SHARDING)));
}
// 单表规则 - 用于非分片表的路由
if (rules.containsKey(KEY_SINGLE)) {
log.info("Creating SingleRuleConfiguration...");
result.add(createSingleRuleConfiguration((Map<String,
Object>) rules.get(KEY_SINGLE)));
}
}
log.info("Total rule configurations created: {}", result.size());
return result;
}
@SuppressWarnings("unchecked")
private SingleRuleConfiguration
createSingleRuleConfiguration(Map<String, Object> singleConfig) {
SingleRuleConfiguration result = new SingleRuleConfiguration();
// 设置默认数据源
if (singleConfig.containsKey("defaultDataSource")) {
result.setDefaultDataSource((String)
singleConfig.get("defaultDataSource"));
}
// 设置表配置
if (singleConfig.containsKey("tables")) {
Object tables = singleConfig.get("tables");
if (tables instanceof List) {
result.getTables().addAll((List<String>) tables);
}
}
log.info("Created SingleRuleConfiguration with defaultDataSource:
{}", result.getDefaultDataSource());
return result;
}
@SuppressWarnings("unchecked")
private ShardingRuleConfiguration
createShardingRuleConfiguration(Map<String, Object> shardingConfig) {
ShardingRuleConfiguration result = new ShardingRuleConfiguration();
// 表规则
Map<String, Object> tables = (Map<String, Object>)
shardingConfig.get(KEY_TABLES);
if (tables != null) {
for (Map.Entry<String, Object> entry : tables.entrySet()) {
ShardingTableRuleConfiguration tableRule =
getShardingTableRuleConfiguration(entry);
result.getTables().add(tableRule);
}
}
// 分片算法
Map<String, Object> algorithms = (Map<String, Object>)
shardingConfig.get("shardingAlgorithms");
if (algorithms != null) {
for (Map.Entry<String, Object> entry : algorithms.entrySet()) {
Map<String, Object> algConfig = (Map<String, Object>)
entry.getValue();
Properties algProps = new Properties();
Map<String, Object> props = (Map<String, Object>)
algConfig.get(KEY_PROPS);
if (props != null) {
algProps.putAll(props);
}
result.getShardingAlgorithms().put(entry.getKey(),
new AlgorithmConfiguration((String)
algConfig.get(KEY_TYPE), algProps));
}
}
return result;
}
/**
* 分库分表策略
* @param entry
* @return
*/
@NotNull
private static ShardingTableRuleConfiguration
getShardingTableRuleConfiguration(Map.Entry<String, Object> entry) {
Map<String, Object> tableConfig = (Map<String, Object>)
entry.getValue();
ShardingTableRuleConfiguration tableRule = new
ShardingTableRuleConfiguration(
entry.getKey(),
(String) tableConfig.get("actualDataNodes")
);
// 分库策略
Map<String, Object> dbStrategy = (Map<String, Object>)
tableConfig.get("databaseStrategy");
if (dbStrategy != null && dbStrategy.containsKey(KEY_STANDARD)) {
Map<String, Object> standard = (Map<String, Object>)
dbStrategy.get(KEY_STANDARD);
tableRule.setDatabaseShardingStrategy(new
StandardShardingStrategyConfiguration(
(String) standard.get(KEY_SHARDING_COLUMN),
(String) standard.get(KEY_SHARDING_ALGORITHM_NAME)
));
}
// 分表策略
Map<String, Object> tableStrategy = (Map<String, Object>)
tableConfig.get("tableStrategy");
if (tableStrategy != null &&
tableStrategy.containsKey(KEY_STANDARD)) {
Map<String, Object> standard = (Map<String, Object>)
tableStrategy.get(KEY_STANDARD);
tableRule.setTableShardingStrategy(new
StandardShardingStrategyConfiguration(
(String) standard.get(KEY_SHARDING_COLUMN),
(String) standard.get(KEY_SHARDING_ALGORITHM_NAME)
));
}
return tableRule;
}
@SuppressWarnings("unchecked")
private ReadwriteSplittingRuleConfiguration
createReadwriteSplittingRuleConfiguration(Map<String, Object> rwConfig) {
List<ReadwriteSplittingDataSourceRuleConfiguration> dataSources =
new ArrayList<>();
Map<String, AlgorithmConfiguration> loadBalancers = new
LinkedHashMap<>();
Map<String, Object> dsConfigs = (Map<String, Object>)
rwConfig.get("dataSources");
if (dsConfigs != null) {
for (Map.Entry<String, Object> entry : dsConfigs.entrySet()) {
Map<String, Object> dsConfig = (Map<String, Object>)
entry.getValue();
// 处理 readDataSourceNames,可能是 List 或 Map
List<String> readDataSourceNames = new ArrayList<>();
Object readDs = dsConfig.get("readDataSourceNames");
if (readDs instanceof List) {
readDataSourceNames = (List<String>) readDs;
} else if (readDs instanceof Map) {
// YAML 数组被解析为 Map 时,取 values
readDataSourceNames = new ArrayList<>(((Map<String,
String>) readDs).values());
}
dataSources.add(new
ReadwriteSplittingDataSourceRuleConfiguration(
entry.getKey(),
(String) dsConfig.get("writeDataSourceName"),
readDataSourceNames,
(String) dsConfig.get("loadBalancerName")
));
}
}
Map<String, Object> lbConfigs = (Map<String, Object>)
rwConfig.get("loadBalancers");
if (lbConfigs != null) {
for (Map.Entry<String, Object> entry : lbConfigs.entrySet()) {
Map<String, Object> lbConfig = (Map<String, Object>)
entry.getValue();
loadBalancers.put(entry.getKey(),
new AlgorithmConfiguration((String)
lbConfig.get("type"), new Properties()));
}
}
return new ReadwriteSplittingRuleConfiguration(dataSources,
loadBalancers);
}
@SuppressWarnings("unchecked")
private Properties createProperties() {
Properties result = new Properties();
Map<String, Object> props = (Map<String, Object>)
config.get("props");
if (props != null) {
result.putAll(props);
}
return result;
}
}
<img width="1663" height="1268" alt="Image"
src="https://github.com/user-attachments/assets/194d740a-2019-4e0f-b0c3-a0dbdda1546b"
/>
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail:
[email protected]
For queries about this service, please contact Infrastructure at:
[email protected]