集成了分表操作

This commit is contained in:
D 2024-04-24 01:40:39 +08:00
parent 8beac776f8
commit 7f43276d3f
5 changed files with 146 additions and 6 deletions

View File

@ -16,6 +16,20 @@ spring:
url: url:
username: username:
password: password:
sharding:
# 分库分表开关/默认关闭
enabled: false
order1:
enabled: false
url:
username:
password:
order2:
enabled: false
url:
username:
password:
# 初始连接数 # 初始连接数
initialSize: 5 initialSize: 5
# 最小连接池数量 # 最小连接池数量

View File

@ -5,8 +5,7 @@ package com.ruoyi.common.enums;
* *
* @author ruoyi * @author ruoyi
*/ */
public enum DataSourceType public enum DataSourceType {
{
/** /**
* 主库 * 主库
*/ */
@ -15,5 +14,10 @@ public enum DataSourceType
/** /**
* 从库 * 从库
*/ */
SLAVE SLAVE,
/**
* 分库分表
*/
SHARDING
} }

View File

@ -1,7 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" <project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent> <parent>
<artifactId>ruoyi</artifactId> <artifactId>ruoyi</artifactId>
<groupId>com.ruoyi</groupId> <groupId>com.ruoyi</groupId>
@ -35,6 +34,13 @@
<artifactId>druid-spring-boot-3-starter</artifactId> <artifactId>druid-spring-boot-3-starter</artifactId>
</dependency> </dependency>
<!-- sharding-jdbc分库分表 -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>sharding-jdbc-core</artifactId>
<version>4.1.1</version>
</dependency>
<!-- 验证码 --> <!-- 验证码 -->
<dependency> <dependency>
<groupId>pro.fessional</groupId> <groupId>pro.fessional</groupId>

View File

@ -6,6 +6,8 @@ import java.util.Map;
import javax.sql.DataSource; import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.web.servlet.FilterRegistrationBean; import org.springframework.boot.web.servlet.FilterRegistrationBean;
@ -36,6 +38,8 @@ import jakarta.servlet.ServletResponse;
@Configuration @Configuration
public class DruidConfig { public class DruidConfig {
Logger logger = LoggerFactory.getLogger(DruidConfig.class);
@Bean @Bean
@ConfigurationProperties("spring.datasource.druid.master") @ConfigurationProperties("spring.datasource.druid.master")
public DataSource masterDataSource(DruidProperties druidProperties) { public DataSource masterDataSource(DruidProperties druidProperties) {
@ -57,6 +61,7 @@ public class DruidConfig {
Map<Object, Object> targetDataSources = new HashMap<>(); Map<Object, Object> targetDataSources = new HashMap<>();
targetDataSources.put(DataSourceType.MASTER.name(), masterDataSource); targetDataSources.put(DataSourceType.MASTER.name(), masterDataSource);
setDataSource(targetDataSources, DataSourceType.SLAVE.name(), "slaveDataSource"); setDataSource(targetDataSources, DataSourceType.SLAVE.name(), "slaveDataSource");
setDataSource(targetDataSources, DataSourceType.SHARDING.name(), "shardingDataSource");
return new DynamicDataSource(masterDataSource, targetDataSources); return new DynamicDataSource(masterDataSource, targetDataSources);
} }
@ -72,6 +77,7 @@ public class DruidConfig {
DataSource dataSource = SpringUtils.getBean(beanName); DataSource dataSource = SpringUtils.getBean(beanName);
targetDataSources.put(sourceName, dataSource); targetDataSources.put(sourceName, dataSource);
} catch (Exception e) { } catch (Exception e) {
logger.error("Failed to register a data source:{}", beanName);
} }
} }

View File

@ -0,0 +1,110 @@
package com.ruoyi.framework.config;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import javax.sql.DataSource;
import org.apache.shardingsphere.api.config.sharding.KeyGeneratorConfiguration;
import org.apache.shardingsphere.api.config.sharding.ShardingRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.TableRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.strategy.InlineShardingStrategyConfiguration;
import org.apache.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.spring.boot3.autoconfigure.DruidDataSourceBuilder;
import com.ruoyi.common.utils.spring.SpringUtils;
import com.ruoyi.framework.config.properties.DruidProperties;
/**
* sharding 配置信息
*
* @author ruoyi
*/
@Configuration
@ConditionalOnProperty(prefix = "spring.datasource.druid.sharding", name = "enabled", havingValue = "true")
public class ShardingDataSourceConfig {
Logger logger = LoggerFactory.getLogger(ShardingDataSourceConfig.class);
@Bean
@ConfigurationProperties("spring.datasource.druid.sharding.order1")
@ConditionalOnProperty(prefix = "spring.datasource.druid.sharding.order1", name = "enabled", havingValue = "true")
public DataSource order1DataSource(DruidProperties druidProperties) {
DruidDataSource dataSource = DruidDataSourceBuilder.create().build();
return druidProperties.dataSource(dataSource);
}
@Bean
@ConfigurationProperties("spring.datasource.druid.sharding.order2")
@ConditionalOnProperty(prefix = "spring.datasource.druid.sharding.order2", name = "enabled", havingValue = "true")
public DataSource order2DataSource(DruidProperties druidProperties) {
DruidDataSource dataSource = DruidDataSourceBuilder.create().build();
return druidProperties.dataSource(dataSource);
}
/**
* 设置数据源
*
* @param targetDataSources 备选数据源集合
* @param sourceName 数据源名称
* @param beanName bean名称
*/
public void setDataSource(Map<String, DataSource> targetDataSources, String sourceName, String beanName) {
try {
DataSource dataSource = SpringUtils.getBean(beanName);
targetDataSources.put(sourceName, dataSource);
} catch (Exception e) {
logger.error("Failed to register a sharding data source:{}", beanName);
}
}
@Bean(name = "shardingDataSource")
public DataSource shardingDataSource() throws SQLException {
Map<String, DataSource> dataSourceMap = new HashMap<>();
// 添加数据源
setDataSource(dataSourceMap, "order1", "order1DataSource");
setDataSource(dataSourceMap, "order2", "order2DataSource");
// 配置分片规则
ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
// 表规则配置 示例:
// 添加order1.sys_order_0,order2.sys_order_0,order1.sys_order_1,order2.sys_order_1
TableRuleConfiguration orderTableRuleConfig = new TableRuleConfiguration(
"sys_user", "order$->{1..2}.sys_order_$->{0..1}");
// 配置分库策略
// 示例: 根据user_id分库,user_id为单数去order1库,偶数去order2库
orderTableRuleConfig.setDatabaseShardingStrategyConfig(
new InlineShardingStrategyConfiguration("user_id", "order$->{user_id % 2 + 1}"));
// 配置分表策略
// 示例: 根据order_id分表,order_id为偶数分到sys_order_0表,奇数分到sys_order_1表
orderTableRuleConfig.setTableShardingStrategyConfig(
new InlineShardingStrategyConfiguration("order_id", "sys_order_$->{order_id % 2}"));
// 分布式主键
orderTableRuleConfig.setKeyGeneratorConfig(new KeyGeneratorConfiguration("SNOWFLAKE", "order_id"));
shardingRuleConfig.getTableRuleConfigs().add(orderTableRuleConfig);
// 获取数据源对象
DataSource dataSource = ShardingDataSourceFactory.createDataSource(dataSourceMap, shardingRuleConfig,
getProperties());
return dataSource;
}
/**
* 系统参数配置
*/
private Properties getProperties() {
Properties shardingProperties = new Properties();
shardingProperties.put("sql.show", true);
return shardingProperties;
}
}