abcdwd commented on issue #31637:
URL: 
https://github.com/apache/shardingsphere/issues/31637#issuecomment-2155977931

   @Configuration
   public class ShardingSphereJDBCConfig {
       @Bean(name = "shardingDataSource")
       public DataSource shardingDataSource(@Qualifier("masterDataSource") 
DataSource masterDataSource, @Qualifier("rpaDataSource") DataSource 
rpaDataSource) throws SQLException {
           Map<String, DataSource> dataSourceMap = new HashMap<>();
           dataSourceMap.put("ds0", masterDataSource);
           dataSourceMap.put("ds1", rpaDataSource);
   
           // 获取数据源对象
           return 
ShardingSphereDataSourceFactory.createDataSource(dataSourceMap, 
getTableRuleConfigurations(), getProperties());
       }
       private List<RuleConfiguration> getTableRuleConfigurations() {
           return Collections.singletonList(createShardingRuleConfiguration());
       }
       private ShardingRuleConfiguration createShardingRuleConfiguration() {
           ShardingRuleConfiguration result = new ShardingRuleConfiguration();
           //使用自动分片表规则列表
   //        result.getAutoTables().add(ShardingAutoTableRuleConfiguration)
   
           //分片表规则列表
           result.getTables().add(getOrderTableRuleConfiguration());
           result.getTables().add(getOrderTableRuleConfiguration2());
   
           //绑定表规则列表
   //        
result.getBindingTableGroups().add(ShardingTableReferenceRuleConfiguration);
   //        result.getBindingTableGroups().add(new 
ShardingTableReferenceRuleConfiguration("foo", "t_order, t_order_item"));
   
           //添加分片算法
           putShardingAlgorithms(result.getShardingAlgorithms());
           //主键生成策略 雪花算法
           result.getKeyGenerators().put("snowflake", new 
AlgorithmConfiguration("SNOWFLAKE", new Properties()));
           //审计策略 用于检查DML(数据操作语言如INSERT, UPDATE, 
DELETE)语句是否包含了必要的分片键,确保操作的正确性。这里的具体审计逻辑由DML_SHARDING_CONDITIONS算法控制。
           result.getAuditors().put("sharding_key_required_auditor", new 
AlgorithmConfiguration("DML_SHARDING_CONDITIONS", new Properties()));
           return result;
       }
   
       private void putShardingAlgorithms(Map<String, AlgorithmConfiguration> 
shardingAlgorithms) {
           // 分片算法 key=自定义 value=AlgorithmConfiguration(算法名称,配置)
           shardingAlgorithms.put("interval", new 
AlgorithmConfiguration("INTERVAL", getProps1()));//基于固定时间范围的分片算法 
IntervalShardingAlgorithm类
           shardingAlgorithms.put("inline1", new 
AlgorithmConfiguration("INLINE", getProps2()));//基于行表达式的分片算法 
InlineShardingAlgorithm类
           shardingAlgorithms.put("inline2", new 
AlgorithmConfiguration("INLINE", getProps3()));
       }
   
       private static Properties getProps1() {
           Properties props1 = new Properties();
           props1.setProperty("datetime-pattern", "yyyy-MM-dd HH:mm:ss");
           props1.setProperty("datetime-lower", "2024-01-01 
00:00:00");//分片的起始时间范围,时间戳格式:yyyy-MM-dd HH:mm:ss
           props1.setProperty("datetime-upper", "2025-01-01 
00:00:00");//分片的结束时间范围,时间戳格式:yyyy-MM-dd HH:mm:ss
           props1.setProperty("sharding-suffix-pattern", 
"yyyyMM");//分片数据源或真实表的后缀格式,必须遵循 Java DateTimeFormatter 的格式,必须和 
datetime-interval-unit 保持一致。例如:yyyyMM
           props1.setProperty("datetime-interval-amount", 
"6");//分片键时间间隔,超过该时间间隔将进入下一分片
           props1.setProperty("datetime-interval-unit", 
"MONTHS");//分片键时间间隔单位,必须遵循 Java ChronoUnit 的枚举值。
           return props1;
       }
   
       private static Properties getProps2() {
           Properties props = new Properties();
           props.setProperty("algorithm-expression", "ds$->{user_id % 2}");
           return props;
       }
   
       private static Properties getProps3() {
           Properties props = new Properties();
           props.setProperty("algorithm-expression", "sys_order_$->{order_id % 
2}");
           return props;
       }
   
       //sys_order_test
       private ShardingTableRuleConfiguration getOrderTableRuleConfiguration2() 
{
           ShardingTableRuleConfiguration result = new 
ShardingTableRuleConfiguration("sys_order_test", 
"ds$->{0}.sys_order_test_$->{[202401,202407,202501]}");
           result.setKeyGenerateStrategy(new 
KeyGenerateStrategyConfiguration("order_id", "snowflake"));
           result.setAuditStrategy(new 
ShardingAuditStrategyConfiguration(Collections.singleton("sharding_key_required_auditor"),
 false));
           result.setTableShardingStrategy(new 
StandardShardingStrategyConfiguration("create_time", "interval"));
           return result;
       }
   
       //sys_order
       private ShardingTableRuleConfiguration getOrderTableRuleConfiguration() {
           ShardingTableRuleConfiguration result = new 
ShardingTableRuleConfiguration("sys_order", "ds$->{0..1}.sys_order_$->{0..1}");
           result.setKeyGenerateStrategy(new 
KeyGenerateStrategyConfiguration("order_id", "snowflake"));
           result.setAuditStrategy(new 
ShardingAuditStrategyConfiguration(Collections.singleton("sharding_key_required_auditor"),
 false));
           result.setDatabaseShardingStrategy(new 
StandardShardingStrategyConfiguration("user_id", "inline1"));
           result.setTableShardingStrategy(new 
StandardShardingStrategyConfiguration("order_id", "inline2"));
           return result;
       }
       
       private Properties getProperties() {
           Properties shardingProperties = new Properties();
           shardingProperties.put("sql.show", true);
           return shardingProperties;
       }
   }


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to